[SCM] lucene-solr packaging branch, upstream, updated. upstream/3.6.1+dfsg-1-gb1d7904

James Page james.page at ubuntu.com
Mon Jan 7 14:00:32 UTC 2013


The following commit has been merged in the upstream branch:
commit b1d7904ca96af7e8bcf91978004d0bab939b2fac
Author: James Page <james.page at ubuntu.com>
Date:   Mon Jan 7 12:31:09 2013 +0000

    Imported Upstream version 3.6.2+dfsg

diff --git a/build.xml b/build.xml
index 76cbe94..83ac708 100644
--- a/build.xml
+++ b/build.xml
@@ -69,7 +69,7 @@
     </sequential>
   </target>
 
-  <property name="version" value="3.6.1-SNAPSHOT"/>
+  <property name="version" value="3.6.2-SNAPSHOT"/>
   <target name="get-maven-poms"
           description="Copy Maven POMs from dev-tools/maven/ to their target locations">
     <copy todir="." overwrite="true">
diff --git a/dev-tools/eclipse/dot.project b/dev-tools/eclipse/dot.project
index de6b15e..0c6d3b3 100755
--- a/dev-tools/eclipse/dot.project
+++ b/dev-tools/eclipse/dot.project
@@ -1,6 +1,6 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <projectDescription>
-	<name>lucene_solr</name>
+	<name>lucene_solr_3_6</name>
 	<comment></comment>
 	<projects>
 	</projects>
diff --git a/dev-tools/idea/.idea/workspace.xml b/dev-tools/idea/.idea/workspace.xml
index 4240494..6f0dca8 100644
--- a/dev-tools/idea/.idea/workspace.xml
+++ b/dev-tools/idea/.idea/workspace.xml
@@ -124,7 +124,7 @@
       <module name="lucene" />
       <option name="TEST_OBJECT" value="package" />
       <option name="WORKING_DIRECTORY" value="file://$PROJECT_DIR$/lucene/build/lucene-idea" />
-      <option name="VM_PARAMETERS" value="-ea -Dlucene.version=3.6.1-SNAPSHOT -DtempDir=temp" />
+      <option name="VM_PARAMETERS" value="-ea -Dlucene.version=3.6.2-SNAPSHOT -DtempDir=temp" />
       <option name="TEST_SEARCH_SCOPE"><value defaultName="singleModule" /></option>
     </configuration>
     <configuration default="false" name="memory contrib" type="JUnit" factoryName="JUnit">
diff --git a/dev-tools/scripts/smokeTestRelease.py b/dev-tools/scripts/smokeTestRelease.py
index 9a408f6..eb9bcf5 100644
--- a/dev-tools/scripts/smokeTestRelease.py
+++ b/dev-tools/scripts/smokeTestRelease.py
@@ -440,7 +440,7 @@ def verifyUnpacked(project, artifact, unpackPath, version, tmpDir):
     run('%s; ant validate' % javaExe('1.7'), '%s/validate.log' % unpackPath)
 
     print '    run "ant rat-sources"'
-    run('%s; ant -lib %s/apache-rat-0.8.jar rat-sources' % (javaExe('1.7'), tmpDir), '%s/rat-sources.log' % unpackPath)
+    run('%s; ant -lib %s/apache-rat-0.8.jar/apache-rat-0.8/apache-rat-0.8.jar rat-sources' % (javaExe('1.7'), tmpDir), '%s/rat-sources.log' % unpackPath)
     
     if project == 'lucene':
       print '    run tests w/ Java 5...'
diff --git a/lucene/CHANGES.txt b/lucene/CHANGES.txt
index ab1e780..1a97a25 100644
--- a/lucene/CHANGES.txt
+++ b/lucene/CHANGES.txt
@@ -3,6 +3,50 @@ Lucene Change Log
 For more information on past and future Lucene versions, please see:
 http://s.apache.org/luceneversions
 
+======================= Lucene 3.6.2 =======================
+
+Bug Fixes
+
+* LUCENE-4234: Exception when FacetsCollector is used with ScoreFacetRequest, 
+  and the number of matching documents is too large. (Gilad Barkai via Shai Erera)
+
+* LUCENE-2686, LUCENE-3505, LUCENE-4401: Fix BooleanQuery scorers to 
+  return correct freq().
+  (Koji Sekiguchi, Mike McCandless, Liu Chao, Robert Muir)
+
+* LUCENE-2501: Fixed rare thread-safety issue that could cause
+  ArrayIndexOutOfBoundsException inside ByteBlockPool (Robert Muir,
+  Mike McCandless)
+
+* LUCENE-4297: BooleanScorer2 would multiply the coord() factor
+  twice for conjunctions: for most users this is no problem, but
+  if you had a customized Similarity that returned something other
+  than 1 when overlap == maxOverlap (always the case for conjunctions),
+  then the score would be incorrect.  (Pascal Chollet, Robert Muir)
+
+* LUCENE-4300: BooleanQuery's rewrite was not always safe: if you
+  had a custom Similarity where coord(1,1) != 1F, then the rewritten
+  query would be scored differently.  (Robert Muir)
+
+* LUCENE-4398: If you index many different field names in your
+  documents then due to a bug in how it measures its RAM
+  usage, IndexWriter would flush each segment too early eventually
+  reaching the point where it flushes after every doc.  (Tim Smith via
+  Mike McCandless)
+
+* LUCENE-4411: when sampling is enabled for a FacetRequest, its depth
+  parameter is reset to the default (1), even if set otherwise.
+  (Gilad Barkai via Shai Erera)
+
+* LUCENE-4635: Fixed ArrayIndexOutOfBoundsException when in-memory
+  terms index requires more than 2.1 GB RAM (indices with billions of
+  terms).  (Tom Burton-West via Mike McCandless)
+  
+Documentation
+
+* LUCENE-4302: Fix facet userguide to have HTML loose doctype like
+  all other javadocs.  (Karl Nicholas via Uwe Schindler)
+
 ======================= Lucene 3.6.1 =======================
 More information about this release, including any errata related to the 
 release notes, upgrade instructions, or other changes may be found online at:
diff --git a/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanScorer.java
deleted file mode 100644
index 6f965af..0000000
--- a/lucene/backwards/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ /dev/null
@@ -1,170 +0,0 @@
-package org.apache.lucene.search;
-
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.lucene.document.Document;
-import org.apache.lucene.document.Field;
-import org.apache.lucene.index.IndexReader;
-import org.apache.lucene.index.RandomIndexWriter;
-import org.apache.lucene.index.Term;
-import org.apache.lucene.store.Directory;
-import org.apache.lucene.util.LuceneTestCase;
-
-public class TestBooleanScorer extends LuceneTestCase
-{
-  private static final String FIELD = "category";
-  
-  public void testMethod() throws Exception {
-    Directory directory = newDirectory();
-
-    String[] values = new String[] { "1", "2", "3", "4" };
-
-    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
-    for (int i = 0; i < values.length; i++) {
-      Document doc = new Document();
-      doc.add(newField(FIELD, values[i], Field.Store.YES, Field.Index.NOT_ANALYZED));
-      writer.addDocument(doc);
-    }
-    IndexReader ir = writer.getReader();
-    writer.close();
-
-    BooleanQuery booleanQuery1 = new BooleanQuery();
-    booleanQuery1.add(new TermQuery(new Term(FIELD, "1")), BooleanClause.Occur.SHOULD);
-    booleanQuery1.add(new TermQuery(new Term(FIELD, "2")), BooleanClause.Occur.SHOULD);
-
-    BooleanQuery query = new BooleanQuery();
-    query.add(booleanQuery1, BooleanClause.Occur.MUST);
-    query.add(new TermQuery(new Term(FIELD, "9")), BooleanClause.Occur.MUST_NOT);
-
-    IndexSearcher indexSearcher = newSearcher(ir);
-    ScoreDoc[] hits = indexSearcher.search(query, null, 1000).scoreDocs;
-    assertEquals("Number of matched documents", 2, hits.length);
-    indexSearcher.close();
-    ir.close();
-    directory.close();
-  }
-  
-  public void testEmptyBucketWithMoreDocs() throws Exception {
-    // This test checks the logic of nextDoc() when all sub scorers have docs
-    // beyond the first bucket (for example). Currently, the code relies on the
-    // 'more' variable to work properly, and this test ensures that if the logic
-    // changes, we have a test to back it up.
-    
-    Similarity sim = Similarity.getDefault();
-    Scorer[] scorers = new Scorer[] {new Scorer(sim) {
-      private int doc = -1;
-      @Override public float score() throws IOException { return 0; }
-      @Override public int docID() { return doc; }
-      
-      @Override public int nextDoc() throws IOException {
-        return doc = doc == -1 ? 3000 : NO_MORE_DOCS;
-      }
-
-      @Override public int advance(int target) throws IOException {
-        return doc = target <= 3000 ? 3000 : NO_MORE_DOCS;
-      }
-      
-    }};
-    BooleanScorer bs = new BooleanScorer(null, false, sim, 1, Arrays.asList(scorers), null, scorers.length);
-
-    final List<Integer> hits = new ArrayList<Integer>();
-    bs.score(new Collector() {
-      int docBase;
-      @Override
-      public void setScorer(Scorer scorer) {
-      }
-      
-      @Override
-      public void collect(int doc) throws IOException {
-        hits.add(docBase+doc);
-      }
-      
-      @Override
-      public void setNextReader(IndexReader reader, int docBase) {
-        this.docBase = docBase;
-      }
-      
-      @Override
-      public boolean acceptsDocsOutOfOrder() {
-        return true;
-      }
-      });
-
-    assertEquals("should have only 1 hit", 1, hits.size());
-    assertEquals("hit should have been docID=3000", 3000, hits.get(0).intValue());
-  }
-
-  public void testMoreThan32ProhibitedClauses() throws Exception {
-    final Directory d = newDirectory();
-    final RandomIndexWriter w = new RandomIndexWriter(random, d);
-    Document doc = new Document();
-    doc.add(new Field("field", "0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33", Field.Store.NO, Field.Index.ANALYZED));
-    w.addDocument(doc);
-    doc = new Document();
-    doc.add(new Field("field", "33", Field.Store.NO, Field.Index.ANALYZED));
-    w.addDocument(doc);
-    final IndexReader r = w.getReader();
-    w.close();
-    final IndexSearcher s = newSearcher(r);
-
-    final BooleanQuery q = new BooleanQuery();
-    for(int term=0;term<33;term++) {
-      q.add(new BooleanClause(new TermQuery(new Term("field", ""+term)),
-                              BooleanClause.Occur.MUST_NOT));
-    }
-    q.add(new BooleanClause(new TermQuery(new Term("field", "33")),
-                            BooleanClause.Occur.SHOULD));
-                            
-    final int[] count = new int[1];
-    s.search(q, new Collector() {
-      private Scorer scorer;
-    
-      @Override
-      public void setScorer(Scorer scorer) {
-        // Make sure we got BooleanScorer:
-        this.scorer = scorer;
-        assertEquals("Scorer is implemented by wrong class", BooleanScorer.class.getName() + "$BucketScorer", scorer.getClass().getName());
-      }
-      
-      @Override
-      public void collect(int doc) throws IOException {
-        count[0]++;
-      }
-      
-      @Override
-      public void setNextReader(IndexReader reader, int docBase) {
-      }
-      
-      @Override
-      public boolean acceptsDocsOutOfOrder() {
-        return true;
-      }
-    });
-
-    assertEquals(1, count[0]);
-    
-    s.close();
-    r.close();
-    d.close();
-  }
-}
diff --git a/lucene/common-build.xml b/lucene/common-build.xml
index ab11322..4d0e0ef 100644
--- a/lucene/common-build.xml
+++ b/lucene/common-build.xml
@@ -46,7 +46,7 @@
 
   <property name="name" value="${ant.project.name}"/>
   <property name="Name" value="Lucene"/>
-  <property name="dev.version" value="3.6.1-SNAPSHOT"/>
+  <property name="dev.version" value="3.6.2-SNAPSHOT"/>
   <property name="tests.luceneMatchVersion" value="3.6"/>
   <property name="version" value="${dev.version}"/>
   <property name="spec.version" value="${version}"/>	
diff --git a/lucene/contrib/CHANGES.txt b/lucene/contrib/CHANGES.txt
index 917a3d4..698fe57 100644
--- a/lucene/contrib/CHANGES.txt
+++ b/lucene/contrib/CHANGES.txt
@@ -3,6 +3,18 @@ Lucene contrib change Log
 For more information on past and future Lucene versions, please see:
 http://s.apache.org/luceneversions
 
+======================= Lucene 3.6.2 =======================
+
+Bug Fixes
+
+* LUCENE-4109: BooleanQueries are not parsed correctly with the
+  flexible query parser. (Karsten Rauch via Robert Muir)
+
+* LUCENE-4269: Deprecate BalancedSegmentMergePolicy (Mike McCandless)
+
+* LUCENE-4289: Fix minor idf inconsistencies/inefficiencies in highlighter.
+  (Robert Muir)
+
 ======================= Lucene 3.6.1 ================
 
 Bug Fixes
diff --git a/lucene/contrib/facet/src/java/org/apache/lucene/facet/doc-files/userguide.html b/lucene/contrib/facet/src/java/org/apache/lucene/facet/doc-files/userguide.html
index 8bc1790..dc9bc8a 100755
--- a/lucene/contrib/facet/src/java/org/apache/lucene/facet/doc-files/userguide.html
+++ b/lucene/contrib/facet/src/java/org/apache/lucene/facet/doc-files/userguide.html
@@ -1,4 +1,4 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
 <!--
  Licensed to the Apache Software Foundation (ASF) under one or more
  contributor license agreements.  See the NOTICE file distributed with
diff --git a/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/FacetsCollector.java b/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/FacetsCollector.java
index 7298be8..cea9089 100644
--- a/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/FacetsCollector.java
+++ b/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/FacetsCollector.java
@@ -71,12 +71,14 @@ public class FacetsCollector extends Collector {
   protected ScoredDocIdCollector initScoredDocCollector(
       FacetSearchParams facetSearchParams, IndexReader indexReader,
       TaxonomyReader taxonomyReader) {
+    boolean scoresNeeded = false;
     for (FacetRequest frq : facetSearchParams.getFacetRequests()) {
       if (frq.requireDocumentScore()) {
-        return ScoredDocIdCollector.create(1000, true);
+        scoresNeeded = true;
+        break;
       }
     }
-    return ScoredDocIdCollector.create(indexReader.maxDoc(), false);
+    return ScoredDocIdCollector.create(indexReader.maxDoc(), scoresNeeded);
   }
 
   /**
diff --git a/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/ScoredDocIdCollector.java b/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/ScoredDocIdCollector.java
index d24aaf2..c8c507f 100644
--- a/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/ScoredDocIdCollector.java
+++ b/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/ScoredDocIdCollector.java
@@ -8,7 +8,7 @@ import org.apache.lucene.search.DocIdSet;
 import org.apache.lucene.search.DocIdSetIterator;
 import org.apache.lucene.search.Scorer;
 import org.apache.lucene.util.ArrayUtil;
-import org.apache.lucene.util.OpenBitSet;
+import org.apache.lucene.util.FixedBitSet;
 
 /**
  * Licensed to the Apache Software Foundation (ASF) under one or more
@@ -51,7 +51,7 @@ public abstract class ScoredDocIdCollector extends Collector {
 
     @Override
     public void collect(int doc) throws IOException {
-      docIds.fastSet(docBase + doc);
+      docIds.set(docBase + doc);
       ++numDocIds;
     }
 
@@ -102,7 +102,9 @@ public abstract class ScoredDocIdCollector extends Collector {
     @SuppressWarnings("synthetic-access")
     public ScoringDocIdCollector(int maxDoc) {
       super(maxDoc);
-      scores = new float[maxDoc];
+      // only matching documents have an entry in the scores array. Therefore start with
+      // a small array and grow when needed.
+      scores = new float[64];
     }
 
     @Override
@@ -110,7 +112,7 @@ public abstract class ScoredDocIdCollector extends Collector {
 
     @Override
     public void collect(int doc) throws IOException {
-      docIds.fastSet(docBase + doc);
+      docIds.set(docBase + doc);
 
       float score = this.scorer.score();
       if (numDocIds >= scores.length) {
@@ -166,7 +168,7 @@ public abstract class ScoredDocIdCollector extends Collector {
 
   protected int numDocIds;
   protected int docBase;
-  protected final OpenBitSet docIds;
+  protected final FixedBitSet docIds;
 
   /**
    * Creates a new {@link ScoredDocIdCollector} with the given parameters.
@@ -186,7 +188,7 @@ public abstract class ScoredDocIdCollector extends Collector {
 
   private ScoredDocIdCollector(int maxDoc) {
     numDocIds = 0;
-    docIds = new OpenBitSet(maxDoc);
+    docIds = new FixedBitSet(maxDoc);
   }
 
   /** Returns the default score used when scoring is disabled. */
diff --git a/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/sampling/Sampler.java b/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/sampling/Sampler.java
index 0f660eb..b8ba98c 100644
--- a/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/sampling/Sampler.java
+++ b/lucene/contrib/facet/src/java/org/apache/lucene/facet/search/sampling/Sampler.java
@@ -4,6 +4,7 @@ import java.io.IOException;
 
 import org.apache.lucene.index.IndexReader;
 
+import org.apache.lucene.facet.search.CategoryListIterator;
 import org.apache.lucene.facet.search.FacetArrays;
 import org.apache.lucene.facet.search.ScoredDocIDs;
 import org.apache.lucene.facet.search.aggregator.Aggregator;
@@ -203,8 +204,21 @@ public abstract class Sampler {
     public OverSampledFacetRequest(FacetRequest orig, int num) {
       super(orig.getCategoryPath(), num);
       this.orig = orig;
+      setDepth(orig.getDepth());
+      setNumLabel(orig.getNumLabel());
+      setResultMode(orig.getResultMode());
+      setSortBy(orig.getSortBy());
+      setSortOrder(orig.getSortOrder());
+    }
+    
+    @Override
+    public CategoryListIterator createCategoryListIterator(IndexReader reader,
+        TaxonomyReader taxo, FacetSearchParams sParams, int partition)
+        throws IOException {
+      return orig.createCategoryListIterator(reader, taxo, sParams, partition);
     }
 
+    
     @Override
     public Aggregator createAggregator(boolean useComplements,
         FacetArrays arrays, IndexReader indexReader,
@@ -222,5 +236,10 @@ public abstract class Sampler {
     public boolean requireDocumentScore() {
       return orig.requireDocumentScore();
     }
+    
+    @Override
+    public boolean supportsComplements() {
+      return orig.supportsComplements();
+    }
   }
 }
diff --git a/lucene/contrib/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java b/lucene/contrib/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
index cc86609..2211b50 100644
--- a/lucene/contrib/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
+++ b/lucene/contrib/facet/src/java/org/apache/lucene/facet/taxonomy/directory/DirectoryTaxonomyWriter.java
@@ -830,13 +830,7 @@ public class DirectoryTaxonomyWriter implements TaxonomyWriter {
         if (term.field() != Consts.FULL) break;
         cp.clear();
         cp.add(term.text(), Consts.DEFAULT_DELIMITER);
-        int ordinal = findCategory(cp);
-        if (ordinal < 0) {
-          // NOTE: call addCategory so that it works well in a multi-threaded
-          // environment, in case e.g. a thread just added the category, after
-          // the findCategory() call above failed to find it.
-          ordinal = addCategory(cp);
-        }
+        final int ordinal = addCategory(cp);
         docs.seek(term);
         docs.next();
         ordinalMap.addMapping(docs.doc(), ordinal);
diff --git a/lucene/contrib/facet/src/test/org/apache/lucene/facet/search/TestFacetsCollector.java b/lucene/contrib/facet/src/test/org/apache/lucene/facet/search/TestFacetsCollector.java
new file mode 100644
index 0000000..c7eb40a
--- /dev/null
+++ b/lucene/contrib/facet/src/test/org/apache/lucene/facet/search/TestFacetsCollector.java
@@ -0,0 +1,89 @@
+package org.apache.lucene.facet.search;
+
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.lucene.analysis.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.facet.index.CategoryDocumentBuilder;
+import org.apache.lucene.facet.search.params.FacetSearchParams;
+import org.apache.lucene.facet.search.params.ScoreFacetRequest;
+import org.apache.lucene.facet.search.results.FacetResult;
+import org.apache.lucene.facet.taxonomy.CategoryPath;
+import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.search.MultiCollector;
+import org.apache.lucene.search.TopScoreDocCollector;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestFacetsCollector extends LuceneTestCase {
+
+  @Test
+  public void testFacetsWithDocScore() throws Exception {
+    Directory indexDir = newDirectory();
+    Directory taxoDir = newDirectory();
+
+    TaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxoDir);
+    IndexWriter iw = new IndexWriter(indexDir, new IndexWriterConfig(
+        TEST_VERSION_CURRENT, new KeywordAnalyzer()));
+
+    CategoryDocumentBuilder cdb = new CategoryDocumentBuilder(taxonomyWriter);
+    Iterable<CategoryPath> cats = Arrays.asList(new CategoryPath("a"));
+    for(int i = atLeast(2000); i > 0; --i) {
+      Document doc = new Document();
+      doc.add(new Field("f", "v", Store.NO, Index.NOT_ANALYZED_NO_NORMS));
+      cdb.setCategoryPaths(cats);
+      iw.addDocument(cdb.build(doc));
+    }
+    
+    taxonomyWriter.close();
+    iw.close();
+    
+    FacetSearchParams sParams = new FacetSearchParams();
+    sParams.addFacetRequest(new ScoreFacetRequest(new CategoryPath("a"), 10));
+    
+    IndexReader r = IndexReader.open(indexDir);
+    DirectoryTaxonomyReader taxo = new DirectoryTaxonomyReader(taxoDir);
+    
+    FacetsCollector fc = new FacetsCollector(sParams, r, taxo);
+    TopScoreDocCollector topDocs = TopScoreDocCollector.create(10, false);
+    new IndexSearcher(r).search(new MatchAllDocsQuery(), MultiCollector.wrap(fc, topDocs));
+    
+    List<FacetResult> res = fc.getFacetResults();
+    double value = res.get(0).getFacetResultNode().getValue();
+    double expected = topDocs.topDocs().getMaxScore() * r.numDocs();
+    assertEquals(expected, value, 1E-10);
+    
+    IOUtils.close(taxo, taxoDir, r, indexDir);
+  }
+  
+}
diff --git a/lucene/contrib/facet/src/test/org/apache/lucene/facet/search/sampling/OversampleWithDepthTest.java b/lucene/contrib/facet/src/test/org/apache/lucene/facet/search/sampling/OversampleWithDepthTest.java
new file mode 100644
index 0000000..5acb89e
--- /dev/null
+++ b/lucene/contrib/facet/src/test/org/apache/lucene/facet/search/sampling/OversampleWithDepthTest.java
@@ -0,0 +1,137 @@
+package org.apache.lucene.facet.search.sampling;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.lucene.analysis.KeywordAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.facet.index.CategoryDocumentBuilder;
+import org.apache.lucene.facet.search.FacetsAccumulator;
+import org.apache.lucene.facet.search.FacetsCollector;
+import org.apache.lucene.facet.search.params.CountFacetRequest;
+import org.apache.lucene.facet.search.params.FacetRequest;
+import org.apache.lucene.facet.search.params.FacetRequest.ResultMode;
+import org.apache.lucene.facet.search.params.FacetSearchParams;
+import org.apache.lucene.facet.search.results.FacetResult;
+import org.apache.lucene.facet.search.results.FacetResultNode;
+import org.apache.lucene.facet.taxonomy.CategoryPath;
+import org.apache.lucene.facet.taxonomy.TaxonomyReader;
+import org.apache.lucene.facet.taxonomy.TaxonomyWriter;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyReader;
+import org.apache.lucene.facet.taxonomy.directory.DirectoryTaxonomyWriter;
+import org.apache.lucene.index.CorruptIndexException;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.LockObtainFailedException;
+import org.apache.lucene.util.IOUtils;
+import org.apache.lucene.util.LuceneTestCase;
+import org.junit.Test;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class OversampleWithDepthTest extends LuceneTestCase {
+  
+  @Test
+  public void testCountWithdepthUsingSamping() throws Exception, IOException {
+    Directory indexDir = newDirectory();
+    Directory taxoDir = newDirectory();
+    
+    // index 100 docs, each with one category: ["root", docnum/10, docnum]
+    // e.g. root/8/87
+    index100Docs(indexDir, taxoDir);
+    
+    IndexReader r = IndexReader.open(indexDir);
+    TaxonomyReader tr = new DirectoryTaxonomyReader(taxoDir);
+    
+    FacetSearchParams fsp = new FacetSearchParams();
+    
+    CountFacetRequest facetRequest = new CountFacetRequest(new CategoryPath("root"), 10);
+    
+    // Setting the depth to '2', should potentially get all categories
+    facetRequest.setDepth(2);
+    facetRequest.setResultMode(ResultMode.PER_NODE_IN_TREE);
+    fsp.addFacetRequest(facetRequest);
+    
+    // Craft sampling params to enforce sampling
+    final SamplingParams params = new SamplingParams();
+    params.setMinSampleSize(2);
+    params.setMaxSampleSize(50);
+    params.setOversampleFactor(5);
+    params.setSampingThreshold(60);
+    params.setSampleRatio(0.1);
+    
+    FacetResult res = searchWithFacets(r, tr, fsp, params);
+    FacetRequest req = res.getFacetRequest();
+    assertEquals(facetRequest, req);
+    
+    FacetResultNode rootNode = res.getFacetResultNode();
+    
+    // Each node below root should also have sub-results as the requested depth was '2'
+    for (FacetResultNode node : rootNode.getSubResults()) {
+      assertTrue("node " + node.getLabel()
+          + " should have had children as the requested depth was '2'",
+          node.getNumSubResults() > 0);
+    }
+    
+    IOUtils.close(r, tr, indexDir, taxoDir);
+  }
+
+  private void index100Docs(Directory indexDir, Directory taxoDir)
+      throws CorruptIndexException, LockObtainFailedException, IOException {
+    IndexWriterConfig iwc = newIndexWriterConfig(TEST_VERSION_CURRENT, new KeywordAnalyzer());
+    IndexWriter w = new IndexWriter(indexDir, iwc);
+    TaxonomyWriter tw = new DirectoryTaxonomyWriter(taxoDir);
+    
+    CategoryDocumentBuilder cdb = new CategoryDocumentBuilder(tw);
+    ArrayList<CategoryPath> categoryPaths = new ArrayList<CategoryPath>(1);
+    
+    for (int i = 0; i < 100; i++) {
+      categoryPaths.clear();
+      categoryPaths.add(new CategoryPath("root",Integer.toString(i / 10), Integer.toString(i)));
+      cdb.setCategoryPaths(categoryPaths);
+      w.addDocument(cdb.build(new Document()));
+    }
+    IOUtils.close(tw, w);
+  }
+
+  /** search reader <code>r</code>*/
+  private FacetResult searchWithFacets(IndexReader r, TaxonomyReader tr,
+      FacetSearchParams fsp, final SamplingParams params) throws IOException {
+    // a FacetsCollector with a sampling accumulator
+    FacetsCollector fcWithSampling = new FacetsCollector(fsp, r, tr) {
+      @Override
+      protected FacetsAccumulator initFacetsAccumulator(FacetSearchParams facetSearchParams, IndexReader indexReader,
+          TaxonomyReader taxonomyReader) {
+        Sampler sampler = new RandomSampler(params, random);
+        return new SamplingAccumulator(sampler, facetSearchParams, indexReader, taxonomyReader);
+      }
+    };
+    
+    IndexSearcher s = new IndexSearcher(r);
+    s.search(new MatchAllDocsQuery(), fcWithSampling);
+    s.close();
+    
+    // there's only one expected result, return just it.
+    return fcWithSampling.getFacetResults().get(0);
+  }
+  
+}
diff --git a/lucene/contrib/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java b/lucene/contrib/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java
index f123e89..2c9c0fb 100644
--- a/lucene/contrib/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java
+++ b/lucene/contrib/facet/src/test/org/apache/lucene/facet/taxonomy/directory/TestAddTaxonomy.java
@@ -173,7 +173,7 @@ public class TestAddTaxonomy extends LuceneTestCase {
 
   public void testConcurrency() throws Exception {
     // tests that addTaxonomy and addCategory work in parallel
-    final int numCategories = atLeast(5000);
+    final int numCategories = atLeast(10000);
     
     // build an input taxonomy index
     Directory src = newDirectory();
diff --git a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java
index ddc8560..52bb490 100644
--- a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java
+++ b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/QueryTermExtractor.java
@@ -61,18 +61,14 @@ public final class QueryTermExtractor
 	public static final WeightedTerm[] getIdfWeightedTerms(Query query, IndexReader reader, String fieldName) 
 	{
 	    WeightedTerm[] terms=getTerms(query,false, fieldName);
-	    int totalNumDocs=reader.numDocs();
+	    int totalNumDocs=reader.maxDoc();
 	    for (int i = 0; i < terms.length; i++)
         {
 	        try
             {
                 int docFreq=reader.docFreq(new Term(fieldName,terms[i].term));
-                // docFreq counts deletes
-                if(totalNumDocs < docFreq) {
-                  docFreq = totalNumDocs;
-                }
                 //IDF algorithm taken from DefaultSimilarity class
-                float idf=(float)(Math.log((float)totalNumDocs/(double)(docFreq+1)) + 1.0);
+                float idf=(float)(Math.log(totalNumDocs/(double)(docFreq+1)) + 1.0);
                 terms[i].weight*=idf;
             } 
 	        catch (IOException e)
diff --git a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
index b212782..fb0d480 100644
--- a/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
+++ b/lucene/contrib/highlighter/src/java/org/apache/lucene/search/highlight/WeightedSpanTermExtractor.java
@@ -425,7 +425,7 @@ public class WeightedSpanTermExtractor {
     Map<String,WeightedSpanTerm> terms = new PositionCheckingMap<String>();
     extract(query, terms);
 
-    int totalNumDocs = reader.numDocs();
+    int totalNumDocs = reader.maxDoc();
     Set<String> weightedTerms = terms.keySet();
     Iterator<String> it = weightedTerms.iterator();
 
@@ -433,12 +433,8 @@ public class WeightedSpanTermExtractor {
       while (it.hasNext()) {
         WeightedSpanTerm weightedSpanTerm = terms.get(it.next());
         int docFreq = reader.docFreq(new Term(fieldName, weightedSpanTerm.term));
-        // docFreq counts deletes
-        if(totalNumDocs < docFreq) {
-          docFreq = totalNumDocs;
-        }
         // IDF algorithm taken from DefaultSimilarity class
-        float idf = (float) (Math.log((float) totalNumDocs / (double) (docFreq + 1)) + 1.0);
+        float idf = (float) (Math.log(totalNumDocs / (double) (docFreq + 1)) + 1.0);
         weightedSpanTerm.weight *= idf;
       }
     } finally {
diff --git a/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java b/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
index 5a23dab..09820a1 100644
--- a/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
+++ b/lucene/contrib/misc/src/java/org/apache/lucene/index/BalancedSegmentMergePolicy.java
@@ -22,6 +22,9 @@ import java.io.IOException;
 import java.util.Collections;
 import java.util.Map;
 
+import org.apache.lucene.index.SerialMergeScheduler; // javadocs
+import org.apache.lucene.index.TieredMergePolicy; // javadocs
+
 /**
  * Merge policy that tries to balance not doing large
  * segment merges with not accumulating too many segments in
@@ -30,7 +33,19 @@ import java.util.Map;
  *
  * <p>This is based on code from zoie, described in more detail
  * at http://code.google.com/p/zoie/wiki/ZoieMergePolicy.</p>
+ *
+ * <p><b>WARNING</b>: there is a known bug in this merge policy
+ * that causes it to run forever, merging the same single
+ * segment over and over.  If you use {@link
+ * SerialMergeScheduler} this can cause an index thread to
+ * hang forever merging.  See <a
+ * href="https://issues.apache.org/jira/browse/LUCENE-4269">LUCENE-4269</a>
+ * for details.</p>
+ *
+ * @deprecated This class is removed in 4.0; use {@link
+ * TieredMergePolicy} instead.
  */
+ at Deprecated
 public class BalancedSegmentMergePolicy extends LogByteSizeMergePolicy {
   
   public static final int DEFAULT_NUM_LARGE_SEGMENTS = 10;
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/processors/PrecedenceQueryNodeProcessorPipeline.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/processors/PrecedenceQueryNodeProcessorPipeline.java
index 724c150..ff0a213 100644
--- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/processors/PrecedenceQueryNodeProcessorPipeline.java
+++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/precedence/processors/PrecedenceQueryNodeProcessorPipeline.java
@@ -19,6 +19,7 @@ package org.apache.lucene.queryParser.precedence.processors;
 
 import org.apache.lucene.queryParser.core.config.QueryConfigHandler;
 import org.apache.lucene.queryParser.precedence.PrecedenceQueryParser;
+import org.apache.lucene.queryParser.standard.processors.BooleanQuery2ModifierNodeProcessor;
 import org.apache.lucene.queryParser.standard.processors.GroupQueryNodeProcessor;
 import org.apache.lucene.queryParser.standard.processors.StandardQueryNodeProcessorPipeline;
 
@@ -46,7 +47,7 @@ public class PrecedenceQueryNodeProcessorPipeline extends StandardQueryNodeProce
     
     for (int i = 0 ; i < size() ; i++) {
       
-      if (get(i).getClass().equals(GroupQueryNodeProcessor.class)) {
+      if (get(i).getClass().equals(BooleanQuery2ModifierNodeProcessor.class)) {
         remove(i--);
       }
       
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/BooleanQuery2ModifierNodeProcessor.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/BooleanQuery2ModifierNodeProcessor.java
new file mode 100644
index 0000000..a0df9e9
--- /dev/null
+++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/BooleanQuery2ModifierNodeProcessor.java
@@ -0,0 +1,202 @@
+package org.apache.lucene.queryParser.standard.processors;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.lucene.queryParser.core.QueryNodeException;
+import org.apache.lucene.queryParser.core.config.QueryConfigHandler;
+import org.apache.lucene.queryParser.core.nodes.AndQueryNode;
+import org.apache.lucene.queryParser.core.nodes.BooleanQueryNode;
+import org.apache.lucene.queryParser.core.nodes.ModifierQueryNode;
+import org.apache.lucene.queryParser.core.nodes.ModifierQueryNode.Modifier;
+import org.apache.lucene.queryParser.core.nodes.QueryNode;
+import org.apache.lucene.queryParser.core.processors.QueryNodeProcessor;
+import org.apache.lucene.queryParser.precedence.processors.BooleanModifiersQueryNodeProcessor;
+import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler;
+import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler.ConfigurationKeys;
+import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler.Operator;
+import org.apache.lucene.queryParser.standard.nodes.BooleanModifierNode;
+import org.apache.lucene.queryParser.standard.parser.StandardSyntaxParser;
+
+/**
+ * <p>
+ * This processor is used to apply the correct {@link ModifierQueryNode} to
+ * {@link BooleanQueryNode}s children. This is a variant of
+ * {@link BooleanModifiersQueryNodeProcessor} which ignores precedence.
+ * </p>
+ * <p>
+ * The {@link StandardSyntaxParser} knows the rules of precedence, but lucene
+ * does not. e.g. <code>(A AND B OR C AND D)</code> ist treated like
+ * <code>(+A +B +C +D)</code>.
+ * </p>
+ * <p>
+ * This processor walks through the query node tree looking for
+ * {@link BooleanQueryNode}s. If an {@link AndQueryNode} is found, every child,
+ * which is not a {@link ModifierQueryNode} or the {@link ModifierQueryNode} is
+ * {@link Modifier#MOD_NONE}, becomes a {@link Modifier#MOD_REQ}. For default
+ * {@link BooleanQueryNode}, it checks the default operator is
+ * {@link Operator#AND}, if it is, the same operation when an
+ * {@link AndQueryNode} is found is applied to it. Each {@link BooleanQueryNode}
+ * which direct parent is also a {@link BooleanQueryNode} is removed (to ignore
+ * the rules of precidence).
+ * </p>
+ * 
+ * @see ConfigurationKeys#DEFAULT_OPERATOR
+ * @see BooleanModifiersQueryNodeProcessor
+ */
+public class BooleanQuery2ModifierNodeProcessor implements QueryNodeProcessor {
+  final static String TAG_REMOVE = "remove";
+  final static String TAG_MODIFIER = "wrapWithModifier";
+  final static String TAG_BOOLEAN_ROOT = "booleanRoot";
+  
+  QueryConfigHandler queryConfigHandler;
+  
+  private final ArrayList<QueryNode> childrenBuffer = new ArrayList<QueryNode>();
+  
+  private Boolean usingAnd = false;
+  
+  public BooleanQuery2ModifierNodeProcessor() {
+    // empty constructor
+  }
+  
+  //@Override
+  public QueryNode process(QueryNode queryTree) throws QueryNodeException {
+    Operator op = getQueryConfigHandler().get(
+        ConfigurationKeys.DEFAULT_OPERATOR);
+    
+    if (op == null) {
+      throw new IllegalArgumentException(
+          "StandardQueryConfigHandler.ConfigurationKeys.DEFAULT_OPERATOR should be set on the QueryConfigHandler");
+    }
+    
+    this.usingAnd = StandardQueryConfigHandler.Operator.AND == op;
+    
+    return processIteration(queryTree);
+    
+  }
+  
+  protected void processChildren(QueryNode queryTree) throws QueryNodeException {
+    List<QueryNode> children = queryTree.getChildren();
+    if (children != null && children.size() > 0) {
+      for (QueryNode child : children) {
+        child = processIteration(child);
+      }
+    }
+  }
+  
+  private QueryNode processIteration(QueryNode queryTree)
+      throws QueryNodeException {
+    queryTree = preProcessNode(queryTree);
+    
+    processChildren(queryTree);
+    
+    queryTree = postProcessNode(queryTree);
+    
+    return queryTree;
+    
+  }
+  
+  protected void fillChildrenBufferAndApplyModifiery(QueryNode parent) {
+    for (QueryNode node : parent.getChildren()) {
+      if (node.containsTag(TAG_REMOVE)) {
+        fillChildrenBufferAndApplyModifiery(node);
+      } else if (node.containsTag(TAG_MODIFIER)) {
+        childrenBuffer.add(applyModifier(node,
+            (Modifier) node.getTag(TAG_MODIFIER)));
+      } else {
+        childrenBuffer.add(node);
+      }
+    }
+  }
+  
+  protected QueryNode postProcessNode(QueryNode node) throws QueryNodeException {
+    if (node.containsTag(TAG_BOOLEAN_ROOT)) {
+      this.childrenBuffer.clear();
+      fillChildrenBufferAndApplyModifiery(node);
+      node.set(childrenBuffer);
+    }
+    return node;
+    
+  }
+  
+  protected QueryNode preProcessNode(QueryNode node) throws QueryNodeException {
+    QueryNode parent = node.getParent();
+    if (node instanceof BooleanQueryNode) {
+      if (parent instanceof BooleanQueryNode) {
+        node.setTag(TAG_REMOVE, Boolean.TRUE); // no precedence
+      } else {
+        node.setTag(TAG_BOOLEAN_ROOT, Boolean.TRUE);
+      }
+    } else if (parent instanceof BooleanQueryNode) {
+      if ((parent instanceof AndQueryNode)
+          || (usingAnd && isDefaultBooleanQueryNode(parent))) {
+        tagModifierButDoNotOverride(node, ModifierQueryNode.Modifier.MOD_REQ);
+      }
+    }
+    return node;
+  }
+  
+  protected boolean isDefaultBooleanQueryNode(QueryNode toTest) {
+    return toTest != null && BooleanQueryNode.class.equals(toTest.getClass());
+  }
+  
+  private QueryNode applyModifier(QueryNode node, Modifier mod) {
+    
+    // check if modifier is not already defined and is default
+    if (!(node instanceof ModifierQueryNode)) {
+      return new BooleanModifierNode(node, mod);
+      
+    } else {
+      ModifierQueryNode modNode = (ModifierQueryNode) node;
+      
+      if (modNode.getModifier() == Modifier.MOD_NONE) {
+        return new ModifierQueryNode(modNode.getChild(), mod);
+      }
+      
+    }
+    
+    return node;
+    
+  }
+  
+  protected void tagModifierButDoNotOverride(QueryNode node, Modifier mod) {
+    if (node instanceof ModifierQueryNode) {
+      ModifierQueryNode modNode = (ModifierQueryNode) node;
+      if (modNode.getModifier() == Modifier.MOD_NONE) {
+        node.setTag(TAG_MODIFIER, mod);
+      }
+    } else {
+      node.setTag(TAG_MODIFIER, ModifierQueryNode.Modifier.MOD_REQ);
+    }
+  }
+  
+  //@Override
+  public void setQueryConfigHandler(QueryConfigHandler queryConfigHandler) {
+    this.queryConfigHandler = queryConfigHandler;
+    
+  }
+  
+  //@Override
+  public QueryConfigHandler getQueryConfigHandler() {
+    return queryConfigHandler;
+  }
+  
+}
+
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/GroupQueryNodeProcessor.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/GroupQueryNodeProcessor.java
index 866cd4a..3156385 100644
--- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/GroupQueryNodeProcessor.java
+++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/GroupQueryNodeProcessor.java
@@ -49,6 +49,7 @@ import org.apache.lucene.queryParser.standard.nodes.BooleanModifierNode;
  * Example: TODO: describe a good example to show how this processor works
  * 
  * @see org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler
+ * @deprecated use {@link BooleanQuery2ModifierNodeProcessor} instead
  */
 public class GroupQueryNodeProcessor implements QueryNodeProcessor {
 
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/MultiFieldQueryNodeProcessor.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/MultiFieldQueryNodeProcessor.java
index 4351cea..a0b2119 100644
--- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/MultiFieldQueryNodeProcessor.java
+++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/MultiFieldQueryNodeProcessor.java
@@ -25,6 +25,7 @@ import org.apache.lucene.queryParser.core.config.QueryConfigHandler;
 import org.apache.lucene.queryParser.core.nodes.BooleanQueryNode;
 import org.apache.lucene.queryParser.core.nodes.FieldableNode;
 import org.apache.lucene.queryParser.core.nodes.GroupQueryNode;
+import org.apache.lucene.queryParser.core.nodes.OrQueryNode;
 import org.apache.lucene.queryParser.core.nodes.QueryNode;
 import org.apache.lucene.queryParser.core.processors.QueryNodeProcessorImpl;
 import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler.ConfigurationKeys;
@@ -108,7 +109,7 @@ public class MultiFieldQueryNodeProcessor extends QueryNodeProcessorImpl {
 
             }
 
-            return new GroupQueryNode(new BooleanQueryNode(children));
+            return new GroupQueryNode(new OrQueryNode(children));
 
           }
 
diff --git a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/StandardQueryNodeProcessorPipeline.java b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/StandardQueryNodeProcessorPipeline.java
index 7793433..97e23eb 100644
--- a/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/StandardQueryNodeProcessorPipeline.java
+++ b/lucene/contrib/queryparser/src/java/org/apache/lucene/queryParser/standard/processors/StandardQueryNodeProcessorPipeline.java
@@ -59,7 +59,8 @@ public class StandardQueryNodeProcessorPipeline extends
     add(new AllowLeadingWildcardProcessor());    
     add(new AnalyzerQueryNodeProcessor());
     add(new PhraseSlopQueryNodeProcessor());
-    add(new GroupQueryNodeProcessor());
+    //add(new GroupQueryNodeProcessor());
+    add(new BooleanQuery2ModifierNodeProcessor());
     add(new NoChildOptimizationQueryNodeProcessor());
     add(new RemoveDeletedQueryNodesProcessor());
     add(new RemoveEmptyNonLeafQueryNodeProcessor());
diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
index ec2450c..e1367f9 100644
--- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
+++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQPHelper.java
@@ -183,15 +183,15 @@ public class TestMultiFieldQPHelper extends LuceneTestCase {
 
     String[] queries2 = { "+one", "+two" };
     q = QueryParserUtil.parse(queries2, fields, new MockAnalyzer(random));
-    assertEquals("(+b:one) (+t:two)", q.toString());
+    assertEquals("b:one t:two", q.toString());
 
     String[] queries3 = { "one", "+two" };
     q = QueryParserUtil.parse(queries3, fields, new MockAnalyzer(random));
-    assertEquals("b:one (+t:two)", q.toString());
+    assertEquals("b:one t:two", q.toString());
 
     String[] queries4 = { "one +more", "+two" };
     q = QueryParserUtil.parse(queries4, fields, new MockAnalyzer(random));
-    assertEquals("(b:one +b:more) (+t:two)", q.toString());
+    assertEquals("(b:one +b:more) t:two", q.toString());
 
     String[] queries5 = { "blah" };
     try {
diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
index c808b12..229a903 100644
--- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
+++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestMultiFieldQueryParserWrapper.java
@@ -184,17 +184,17 @@ public class TestMultiFieldQueryParserWrapper extends LuceneTestCase {
     String[] queries2 = { "+one", "+two" };
     q = MultiFieldQueryParserWrapper.parse(queries2, fields,
         new StandardAnalyzer(TEST_VERSION_CURRENT));
-    assertEquals("(+b:one) (+t:two)", q.toString());
+    assertEquals("b:one t:two", q.toString());
 
     String[] queries3 = { "one", "+two" };
     q = MultiFieldQueryParserWrapper.parse(queries3, fields,
         new StandardAnalyzer(TEST_VERSION_CURRENT));
-    assertEquals("b:one (+t:two)", q.toString());
+    assertEquals("b:one t:two", q.toString());
 
     String[] queries4 = { "one +more", "+two" };
     q = MultiFieldQueryParserWrapper.parse(queries4, fields,
         new StandardAnalyzer(TEST_VERSION_CURRENT));
-    assertEquals("(b:one +b:more) (+t:two)", q.toString());
+    assertEquals("(b:one +b:more) t:two", q.toString());
 
     String[] queries5 = { "blah" };
     try {
diff --git a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
index a9249da..b642f5c 100644
--- a/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
+++ b/lucene/contrib/queryparser/src/test/org/apache/lucene/queryParser/standard/TestQPHelper.java
@@ -63,6 +63,7 @@ import org.apache.lucene.queryParser.core.processors.QueryNodeProcessorPipeline;
 import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler;
 import org.apache.lucene.queryParser.standard.config.StandardQueryConfigHandler.Operator;
 import org.apache.lucene.queryParser.standard.nodes.WildcardQueryNode;
+import org.apache.lucene.queryParser.standard.StandardQueryParser;
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.FuzzyQuery;
@@ -468,6 +469,27 @@ public class TestQPHelper extends LuceneTestCase {
     assertQueryEquals("a&&b", a, "a&&b");
     assertQueryEquals(".NET", a, ".NET");
   }
+  
+  public void testGroup() throws Exception {
+    assertQueryEquals("!(a AND b) OR c", null, "-(+a +b) c");
+    assertQueryEquals("!(a AND b) AND c", null, "-(+a +b) +c");
+    assertQueryEquals("((a AND b) AND c)", null, "+(+a +b) +c");
+    assertQueryEquals("(a AND b) AND c", null, "+(+a +b) +c");
+    assertQueryEquals("b !(a AND b)", null, "b -(+a +b)");
+    assertQueryEquals("(a AND b)^4 OR c", null, "((+a +b)^4.0) c");
+  }
+  
+  public void testParens() throws Exception {
+    StandardQueryParser qp = new StandardQueryParser(new MockAnalyzer(random));
+    String query = "(field:[1 TO *] AND field:[* TO 2]) AND field2:(z)";
+    BooleanQuery q = new BooleanQuery();
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new TermRangeQuery("field", "1", "*", true, true), BooleanClause.Occur.MUST);
+    bq.add(new TermRangeQuery("field", "*", "2", true, true), BooleanClause.Occur.MUST);
+    q.add(bq, BooleanClause.Occur.MUST);
+    q.add(new TermQuery(new Term("field2", "z")), BooleanClause.Occur.MUST);
+    assertEquals(q, qp.parse(query, "foo"));
+  }
 
   public void testSlop() throws Exception {
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocFieldConsumerPerField.java b/lucene/core/src/java/org/apache/lucene/index/DocFieldConsumerPerField.java
index f70e815..6c4eaad 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocFieldConsumerPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocFieldConsumerPerField.java
@@ -24,4 +24,5 @@ abstract class DocFieldConsumerPerField {
   /** Processes all occurrences of a single field */
   abstract void processFields(Fieldable[] fields, int count) throws IOException;
   abstract void abort();
+  abstract void close();
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
index 857b3fb..c55a0a5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessor.java
@@ -50,7 +50,7 @@ final class DocFieldProcessor extends DocConsumer {
   public void flush(Collection<DocConsumerPerThread> threads, SegmentWriteState state) throws IOException {
 
     Map<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>> childThreadsAndFields = new HashMap<DocFieldConsumerPerThread, Collection<DocFieldConsumerPerField>>();
-    for ( DocConsumerPerThread thread : threads) {
+    for (DocConsumerPerThread thread : threads) {
       DocFieldProcessorPerThread perThread = (DocFieldProcessorPerThread) thread;
       childThreadsAndFields.put(perThread.consumer, perThread.fields());
       perThread.trimFields(state);
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
index c21ff0e..8437eea 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocFieldProcessorPerThread.java
@@ -146,6 +146,8 @@ final class DocFieldProcessorPerThread extends DocConsumerPerThread {
           if (state.infoStream != null)
             state.infoStream.println("  purge field=" + perField.fieldInfo.name);
 
+          perField.consumer.close();
+
           totalFieldCount--;
 
         } else {
diff --git a/lucene/core/src/java/org/apache/lucene/index/DocInverterPerField.java b/lucene/core/src/java/org/apache/lucene/index/DocInverterPerField.java
index 00fbf29..adc6414 100644
--- a/lucene/core/src/java/org/apache/lucene/index/DocInverterPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/DocInverterPerField.java
@@ -61,6 +61,11 @@ final class DocInverterPerField extends DocFieldConsumerPerField {
   }
 
   @Override
+  public void close() {
+    consumer.close();
+  }
+
+  @Override
   public void processFields(final Fieldable[] fields,
                             final int count) throws IOException {
 
diff --git a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
index 7ba4f29..1204e17 100644
--- a/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/FreqProxTermsWriterPerField.java
@@ -48,7 +48,7 @@ final class FreqProxTermsWriterPerField extends TermsHashConsumerPerField implem
 
   @Override
   int getStreamCount() {
-    if (fieldInfo.indexOptions != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
+    if (indexOptions != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS)
       return 1;
     else
       return 2;
diff --git a/lucene/core/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java b/lucene/core/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java
index cb7a333..de249b5 100644
--- a/lucene/core/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/InvertedDocConsumerPerField.java
@@ -41,4 +41,6 @@ abstract class InvertedDocConsumerPerField {
 
   // Called on hitting an aborting exception
   abstract void abort();
+
+  abstract void close();
 }
diff --git a/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java b/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
index e05ba80..72f57f3 100644
--- a/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
+++ b/lucene/core/src/java/org/apache/lucene/index/TermsHashPerField.java
@@ -79,6 +79,26 @@ final class TermsHashPerField extends InvertedDocConsumerPerField {
       nextPerField = null;
   }
 
+  @Override
+  public void close() {
+    if (perThread.termsHash.trackAllocations) {
+      try {
+        if (postingsHash != null) {
+          bytesUsed(-postingsHash.length * RamUsageEstimator.NUM_BYTES_INT);
+          postingsHash = null;
+        }
+        if (postingsArray != null) {
+          bytesUsed(-postingsArray.bytesPerPosting() * postingsArray.size);
+          postingsArray = null;
+        }
+      } finally {
+        if (nextPerField != null) {
+          nextPerField.close();
+        }
+      }
+    }
+  }
+
   private void initPostingsArray() {
     postingsArray = consumer.createPostingsArray(2);
     bytesUsed(postingsArray.size * postingsArray.bytesPerPosting());
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
index dfb9917..e3cd604 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanQuery.java
@@ -200,6 +200,13 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
       return sum ;
     }
 
+    float coord(int overlap, int maxOverlap) {
+      // LUCENE-4300: in most cases of maxOverlap=1, BQ rewrites itself away,
+      // so coord() is not applied. But when BQ cannot optimize itself away
+      // for a single clause (minNrShouldMatch, prohibited clauses, etc), its
+      // important not to apply coord(1,1) for consistency, it might not be 1.0F
+      return maxOverlap == 1 ? 1F : similarity.coord(overlap, maxOverlap);
+    }
 
     @Override
     public void normalize(float norm) {
@@ -272,7 +279,7 @@ public class BooleanQuery extends Query implements Iterable<BooleanClause> {
       sumExpl.setMatch(0 < coord ? Boolean.TRUE : Boolean.FALSE);
       sumExpl.setValue(sum);
       
-      final float coordFactor = disableCoord ? 1.0f : similarity.coord(coord, maxCoord);
+      final float coordFactor = disableCoord ? 1.0f : coord(coord, maxCoord);
       if (coordFactor == 1.0f) {
         return sumExpl;                             // eliminate wrapper
       } else {
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
index c8ded8f..1b579ab 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer.java
@@ -22,6 +22,7 @@ import java.util.List;
 
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery.BooleanWeight;
 
 /* Description from Doug Cutting (excerpted from
  * LUCENE-1483):
@@ -203,7 +204,7 @@ final class BooleanScorer extends Scorer {
   // Any time a prohibited clause matches we set bit 0:
   private static final int PROHIBITED_MASK = 1;
   
-  BooleanScorer(Weight weight, boolean disableCoord, Similarity similarity, int minNrShouldMatch,
+  BooleanScorer(BooleanWeight weight, boolean disableCoord, Similarity similarity, int minNrShouldMatch,
       List<Scorer> optionalScorers, List<Scorer> prohibitedScorers, int maxCoord) throws IOException {
     super(weight);
     this.minNrShouldMatch = minNrShouldMatch;
@@ -226,7 +227,7 @@ final class BooleanScorer extends Scorer {
 
     coordFactors = new float[optionalScorers.size() + 1];
     for (int i = 0; i < coordFactors.length; i++) {
-      coordFactors[i] = disableCoord ? 1.0f : similarity.coord(i, maxCoord); 
+      coordFactors[i] = disableCoord ? 1.0f : weight.coord(i, maxCoord); 
     }
   }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java
index 4302216..f6fb6f8 100644
--- a/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java
+++ b/lucene/core/src/java/org/apache/lucene/search/BooleanScorer2.java
@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.lucene.search.BooleanClause.Occur;
+import org.apache.lucene.search.BooleanQuery.BooleanWeight;
 
 /* See the description in BooleanScorer.java, comparing
  * BooleanScorer & BooleanScorer2 */
@@ -42,10 +43,10 @@ class BooleanScorer2 extends Scorer {
     int maxCoord = 0; // to be increased for each non prohibited scorer
     int nrMatchers; // to be increased by score() of match counting scorers.
     
-    void init(Similarity sim, boolean disableCoord) { // use after all scorers have been added.
+    void init(BooleanWeight weight, boolean disableCoord) { // use after all scorers have been added.
       coordFactors = new float[optionalScorers.size() + requiredScorers.size() + 1];
       for (int i = 0; i < coordFactors.length; i++) {
-        coordFactors[i] = disableCoord ? 1.0f : sim.coord(i, maxCoord);
+        coordFactors[i] = disableCoord ? 1.0f : weight.coord(i, maxCoord);
       }
     }
   }
@@ -83,7 +84,7 @@ class BooleanScorer2 extends Scorer {
    * @param optional
    *          the list of optional scorers.
    */
-  public BooleanScorer2(Weight weight, boolean disableCoord, Similarity similarity, int minNrShouldMatch,
+  public BooleanScorer2(BooleanWeight weight, boolean disableCoord, Similarity similarity, int minNrShouldMatch,
       List<Scorer> required, List<Scorer> prohibited, List<Scorer> optional, int maxCoord) throws IOException {
     super(weight);
     if (minNrShouldMatch < 0) {
@@ -97,7 +98,7 @@ class BooleanScorer2 extends Scorer {
     requiredScorers = required;    
     prohibitedScorers = prohibited;
     
-    coordinator.init(similarity, disableCoord);
+    coordinator.init(weight, disableCoord);
     countingSumScorer = makeCountingSumScorer(disableCoord, similarity);
   }
   
@@ -128,6 +129,11 @@ class BooleanScorer2 extends Scorer {
     }
 
     @Override
+    public float freq() throws IOException {
+      return 1;
+    }
+
+    @Override
     public int docID() {
       return scorer.docID();
     }
@@ -170,7 +176,7 @@ class BooleanScorer2 extends Scorer {
                                               List<Scorer> requiredScorers) throws IOException {
     // each scorer from the list counted as a single matcher
     final int requiredNrMatchers = requiredScorers.size();
-    return new ConjunctionScorer(weight, disableCoord ? 1.0f : similarity.coord(requiredScorers.size(), requiredScorers.size()), requiredScorers) {
+    return new ConjunctionScorer(weight, requiredScorers) {
       private int lastScoredDoc = -1;
       // Save the score of lastScoredDoc, so that we don't compute it more than
       // once in score().
@@ -196,7 +202,7 @@ class BooleanScorer2 extends Scorer {
   private Scorer dualConjunctionSumScorer(boolean disableCoord,
                                           Similarity similarity,
                                           Scorer req1, Scorer req2) throws IOException { // non counting.
-    return new ConjunctionScorer(weight, disableCoord ? 1.0f : similarity.coord(2, 2), req1, req2);
+    return new ConjunctionScorer(weight, req1, req2);
     // All scorers match, so defaultSimilarity always has 1 as
     // the coordination factor.
     // Therefore the sum of the scores of two scorers
@@ -311,8 +317,8 @@ class BooleanScorer2 extends Scorer {
   }
 
   @Override
-  public float freq() {
-    return coordinator.nrMatchers;
+  public float freq() throws IOException {
+    return countingSumScorer.freq();
   }
 
   @Override
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
index b8dea25..4146cbf 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConjunctionScorer.java
@@ -26,17 +26,15 @@ import java.util.Comparator;
 class ConjunctionScorer extends Scorer {
   
   private final Scorer[] scorers;
-  private final float coord;
   private int lastDoc = -1;
 
-  public ConjunctionScorer(Weight weight, float coord, Collection<Scorer> scorers) throws IOException {
-    this(weight, coord, scorers.toArray(new Scorer[scorers.size()]));
+  public ConjunctionScorer(Weight weight, Collection<Scorer> scorers) throws IOException {
+    this(weight, scorers.toArray(new Scorer[scorers.size()]));
   }
 
-  public ConjunctionScorer(Weight weight, float coord, Scorer... scorers) throws IOException {
+  public ConjunctionScorer(Weight weight, Scorer... scorers) throws IOException {
     super(weight);
     this.scorers = scorers;
-    this.coord = coord;
     
     for (int i = 0; i < scorers.length; i++) {
       if (scorers[i].nextDoc() == NO_MORE_DOCS) {
@@ -134,6 +132,11 @@ class ConjunctionScorer extends Scorer {
     for (int i = 0; i < scorers.length; i++) {
       sum += scorers[i].score();
     }
-    return sum * coord;
+    return sum;
+  }
+
+  @Override
+  public float freq() throws IOException {
+    return scorers.length;
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
index bcb372e..6532b61 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ConstantScoreQuery.java
@@ -198,6 +198,7 @@ public class ConstantScoreQuery extends Query {
 
     @Override
     public float score() throws IOException {
+      assert docIdSetIterator.docID() != NO_MORE_DOCS;
       return theScore;
     }
 
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
index 96f896c..767f0e6 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxQuery.java
@@ -147,7 +147,7 @@ public class DisjunctionMaxQuery extends Query implements Iterable<Query> {
       int idx = 0;
       for (Weight w : weights) {
         Scorer subScorer = w.scorer(reader, true, false);
-        if (subScorer != null && subScorer.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
+        if (subScorer != null) {
           scorers[idx++] = subScorer;
         }
       }
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
index 2028fce..0f177c3 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionMaxScorer.java
@@ -24,11 +24,7 @@ import java.io.IOException;
  * by the subquery scorers that generate that document, plus tieBreakerMultiplier times the sum of the scores
  * for the other subqueries that generate the document.
  */
-class DisjunctionMaxScorer extends Scorer {
-
-  /* The scorers for subqueries that have remaining docs, kept as a min heap by number of next doc. */
-  private final Scorer[] subScorers;
-  private int numScorers;
+class DisjunctionMaxScorer extends DisjunctionScorer {
   /* Multiplier applied to non-maximum-scoring subqueries for a document as they are summed into the result. */
   private final float tieBreakerMultiplier;
   private int doc = -1;
@@ -55,16 +51,9 @@ class DisjunctionMaxScorer extends Scorer {
    *          length may be larger than the actual number of scorers.
    */
   public DisjunctionMaxScorer(Weight weight, float tieBreakerMultiplier,
-      Similarity similarity, Scorer[] subScorers, int numScorers) throws IOException {
-    super(similarity, weight);
+      Similarity similarity, Scorer[] subScorers, int numScorers) {
+    super(similarity, weight, subScorers, numScorers);
     this.tieBreakerMultiplier = tieBreakerMultiplier;
-    // The passed subScorers array includes only scorers which have documents
-    // (DisjunctionMaxQuery takes care of that), and their nextDoc() was already
-    // called.
-    this.subScorers = subScorers;
-    this.numScorers = numScorers;
-    
-    heapify();
   }
 
   @Override
@@ -114,6 +103,24 @@ class DisjunctionMaxScorer extends Scorer {
   }
 
   @Override
+  public float freq() throws IOException {
+    int doc = subScorers[0].docID();
+    int size = numScorers;
+    return 1 + freq(1, size, doc) + freq(2, size, doc);
+  }
+  
+  // Recursively iterate all subScorers that generated last doc computing sum and max
+  private int freq(int root, int size, int doc) throws IOException {
+    int freq = 0;
+    if (root < size && subScorers[root].docID() == doc) {
+      freq++;
+      freq += freq((root<<1)+1, size, doc);
+      freq += freq((root<<1)+2, size, doc);
+    }
+    return freq;
+  }
+
+  @Override
   public int advance(int target) throws IOException {
     if (numScorers == 0) return doc = NO_MORE_DOCS;
     while (subScorers[0].docID() < target) {
@@ -129,63 +136,6 @@ class DisjunctionMaxScorer extends Scorer {
     return doc = subScorers[0].docID();
   }
 
-  // Organize subScorers into a min heap with scorers generating the earliest document on top.
-  private void heapify() {
-    for (int i = (numScorers >> 1) - 1; i >= 0; i--) {
-      heapAdjust(i);
-    }
-  }
-
-  /* The subtree of subScorers at root is a min heap except possibly for its root element.
-   * Bubble the root down as required to make the subtree a heap.
-   */
-  private void heapAdjust(int root) {
-    Scorer scorer = subScorers[root];
-    int doc = scorer.docID();
-    int i = root;
-    while (i <= (numScorers >> 1) - 1) {
-      int lchild = (i << 1) + 1;
-      Scorer lscorer = subScorers[lchild];
-      int ldoc = lscorer.docID();
-      int rdoc = Integer.MAX_VALUE, rchild = (i << 1) + 2;
-      Scorer rscorer = null;
-      if (rchild < numScorers) {
-        rscorer = subScorers[rchild];
-        rdoc = rscorer.docID();
-      }
-      if (ldoc < doc) {
-        if (rdoc < ldoc) {
-          subScorers[i] = rscorer;
-          subScorers[rchild] = scorer;
-          i = rchild;
-        } else {
-          subScorers[i] = lscorer;
-          subScorers[lchild] = scorer;
-          i = lchild;
-        }
-      } else if (rdoc < doc) {
-        subScorers[i] = rscorer;
-        subScorers[rchild] = scorer;
-        i = rchild;
-      } else {
-        return;
-      }
-    }
-  }
-
-  // Remove the root Scorer from subScorers and re-establish it as a heap
-  private void heapRemoveRoot() {
-    if (numScorers == 1) {
-      subScorers[0] = null;
-      numScorers = 0;
-    } else {
-      subScorers[0] = subScorers[numScorers - 1];
-      subScorers[numScorers - 1] = null;
-      --numScorers;
-      heapAdjust(0);
-    }
-  }
-
   @Override
   public void visitSubScorers(Query parent, BooleanClause.Occur relationship, ScorerVisitor<Query, Query, Scorer> visitor) {
     super.visitSubScorers(parent, relationship, visitor);
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
new file mode 100644
index 0000000..c78d3e6
--- /dev/null
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionScorer.java
@@ -0,0 +1,96 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Base class for Scorers that score disjunctions.
+ * Currently this just provides helper methods to manage the heap.
+ */
+abstract class DisjunctionScorer extends Scorer {
+  protected final Scorer subScorers[];
+  protected int numScorers;
+  
+  protected DisjunctionScorer(Similarity similarity, Weight weight, Scorer subScorers[], int numScorers) {
+    super(similarity, weight);
+    this.subScorers = subScorers;
+    this.numScorers = numScorers;
+    heapify();
+  }
+  
+  /** 
+   * Organize subScorers into a min heap with scorers generating the earliest document on top.
+   */
+  protected final void heapify() {
+    for (int i = (numScorers >> 1) - 1; i >= 0; i--) {
+      heapAdjust(i);
+    }
+  }
+  
+  /** 
+   * The subtree of subScorers at root is a min heap except possibly for its root element.
+   * Bubble the root down as required to make the subtree a heap.
+   */
+  protected final void heapAdjust(int root) {
+    Scorer scorer = subScorers[root];
+    int doc = scorer.docID();
+    int i = root;
+    while (i <= (numScorers >> 1) - 1) {
+      int lchild = (i << 1) + 1;
+      Scorer lscorer = subScorers[lchild];
+      int ldoc = lscorer.docID();
+      int rdoc = Integer.MAX_VALUE, rchild = (i << 1) + 2;
+      Scorer rscorer = null;
+      if (rchild < numScorers) {
+        rscorer = subScorers[rchild];
+        rdoc = rscorer.docID();
+      }
+      if (ldoc < doc) {
+        if (rdoc < ldoc) {
+          subScorers[i] = rscorer;
+          subScorers[rchild] = scorer;
+          i = rchild;
+        } else {
+          subScorers[i] = lscorer;
+          subScorers[lchild] = scorer;
+          i = lchild;
+        }
+      } else if (rdoc < doc) {
+        subScorers[i] = rscorer;
+        subScorers[rchild] = scorer;
+        i = rchild;
+      } else {
+        return;
+      }
+    }
+  }
+
+  /** 
+   * Remove the root Scorer from subScorers and re-establish it as a heap
+   */
+  protected final void heapRemoveRoot() {
+    if (numScorers == 1) {
+      subScorers[0] = null;
+      numScorers = 0;
+    } else {
+      subScorers[0] = subScorers[numScorers - 1];
+      subScorers[numScorers - 1] = null;
+      --numScorers;
+      heapAdjust(0);
+    }
+  }
+}
diff --git a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
index d29d050..f806ca7 100644
--- a/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/DisjunctionSumScorer.java
@@ -20,42 +20,20 @@ package org.apache.lucene.search;
 import java.util.List;
 import java.io.IOException;
 
-import org.apache.lucene.util.ScorerDocQueue;
-
 /** A Scorer for OR like queries, counterpart of <code>ConjunctionScorer</code>.
  * This Scorer implements {@link Scorer#skipTo(int)} and uses skipTo() on the given Scorers. 
  */
-class DisjunctionSumScorer extends Scorer {
-  /** The number of subscorers. */ 
-  private final int nrScorers;
-  
-  /** The subscorers. */
-  protected final List<Scorer> subScorers;
-  
+class DisjunctionSumScorer extends DisjunctionScorer { 
   /** The minimum number of scorers that should match. */
   private final int minimumNrMatchers;
   
-  /** The scorerDocQueue contains all subscorers ordered by their current doc(),
-   * with the minimum at the top.
-   * <br>The scorerDocQueue is initialized the first time next() or skipTo() is called.
-   * <br>An exhausted scorer is immediately removed from the scorerDocQueue.
-   * <br>If less than the minimumNrMatchers scorers
-   * remain in the scorerDocQueue next() and skipTo() return false.
-   * <p>
-   * After each to call to next() or skipTo()
-   * <code>currentSumScore</code> is the total score of the current matching doc,
-   * <code>nrMatchers</code> is the number of matching scorers,
-   * and all scorers are after the matching doc, or are exhausted.
-   */
-  private ScorerDocQueue scorerDocQueue;
-  
   /** The document number of the current match. */
-  private int currentDoc = -1;
+  private int doc = -1;
 
   /** The number of subscorers that provide the current match. */
   protected int nrMatchers = -1;
 
-  private double currentScore = Float.NaN;
+  private double score = Float.NaN;
   
   /** Construct a <code>DisjunctionScorer</code>.
    * @param weight The weight to be used.
@@ -69,21 +47,16 @@ class DisjunctionSumScorer extends Scorer {
    * it more efficient to use <code>ConjunctionScorer</code>.
    */
   public DisjunctionSumScorer(Weight weight, List<Scorer> subScorers, int minimumNrMatchers) throws IOException {
-    super(weight);
-    
-    nrScorers = subScorers.size();
+    super(null, weight, subScorers.toArray(new Scorer[subScorers.size()]), subScorers.size());
 
     if (minimumNrMatchers <= 0) {
       throw new IllegalArgumentException("Minimum nr of matchers must be positive");
     }
-    if (nrScorers <= 1) {
+    if (numScorers <= 1) {
       throw new IllegalArgumentException("There must be at least 2 subScorers");
     }
 
     this.minimumNrMatchers = minimumNrMatchers;
-    this.subScorers = subScorers;
-
-    initScorerDocQueue();
   }
   
   /** Construct a <code>DisjunctionScorer</code>, using one as the minimum number
@@ -93,123 +66,78 @@ class DisjunctionSumScorer extends Scorer {
     this(weight, subScorers, 1);
   }
 
-  /** Called the first time next() or skipTo() is called to
-   * initialize <code>scorerDocQueue</code>.
-   */
-  private void initScorerDocQueue() throws IOException {
-    scorerDocQueue = new ScorerDocQueue(nrScorers);
-    for (Scorer se : subScorers) {
-      if (se.nextDoc() != NO_MORE_DOCS) {
-        scorerDocQueue.insert(se);
-      }
-    }
-  }
-
-  /** Scores and collects all matching documents.
-   * @param collector The collector to which all matching documents are passed through.
-   */
-  @Override
-  public void score(Collector collector) throws IOException {
-    collector.setScorer(this);
-    while (nextDoc() != NO_MORE_DOCS) {
-      collector.collect(currentDoc);
-    }
-  }
-
-  /** Expert: Collects matching documents in a range.  Hook for optimization.
-   * Note that {@link #next()} must be called once before this method is called
-   * for the first time.
-   * @param collector The collector to which all matching documents are passed through.
-   * @param max Do not score documents past this.
-   * @return true if more matching documents may remain.
-   */
-  @Override
-  protected boolean score(Collector collector, int max, int firstDocID) throws IOException {
-    // firstDocID is ignored since nextDoc() sets 'currentDoc'
-    collector.setScorer(this);
-    while (currentDoc < max) {
-      collector.collect(currentDoc);
-      if (nextDoc() == NO_MORE_DOCS) {
-        return false;
-      }
-    }
-    return true;
-  }
-
   @Override
   public int nextDoc() throws IOException {
-    if (scorerDocQueue.size() < minimumNrMatchers || !advanceAfterCurrent()) {
-      currentDoc = NO_MORE_DOCS;
-    }
-    return currentDoc;
-  }
-
-  /** Advance all subscorers after the current document determined by the
-   * top of the <code>scorerDocQueue</code>.
-   * Repeat until at least the minimum number of subscorers match on the same
-   * document and all subscorers are after that document or are exhausted.
-   * <br>On entry the <code>scorerDocQueue</code> has at least <code>minimumNrMatchers</code>
-   * available. At least the scorer with the minimum document number will be advanced.
-   * @return true iff there is a match.
-   * <br>In case there is a match, </code>currentDoc</code>, </code>currentSumScore</code>,
-   * and </code>nrMatchers</code> describe the match.
-   *
-   * TODO: Investigate whether it is possible to use skipTo() when
-   * the minimum number of matchers is bigger than one, ie. try and use the
-   * character of ConjunctionScorer for the minimum number of matchers.
-   * Also delay calling score() on the sub scorers until the minimum number of
-   * matchers is reached.
-   * <br>For this, a Scorer array with minimumNrMatchers elements might
-   * hold Scorers at currentDoc that are temporarily popped from scorerQueue.
-   */
-  protected boolean advanceAfterCurrent() throws IOException {
-    do { // repeat until minimum nr of matchers
-      currentDoc = scorerDocQueue.topDoc();
-      currentScore = scorerDocQueue.topScore();
-      nrMatchers = 1;
-      do { // Until all subscorers are after currentDoc
-        if (!scorerDocQueue.topNextAndAdjustElsePop()) {
-          if (scorerDocQueue.size() == 0) {
-            break; // nothing more to advance, check for last match.
+    assert doc != NO_MORE_DOCS;
+    while(true) {
+      while (subScorers[0].docID() == doc) {
+        if (subScorers[0].nextDoc() != NO_MORE_DOCS) {
+          heapAdjust(0);
+        } else {
+          heapRemoveRoot();
+          if (numScorers < minimumNrMatchers) {
+            return doc = NO_MORE_DOCS;
           }
         }
-        if (scorerDocQueue.topDoc() != currentDoc) {
-          break; // All remaining subscorers are after currentDoc.
-        }
-        currentScore += scorerDocQueue.topScore();
-        nrMatchers++;
-      } while (true);
-      
+      }
+      afterNext();
       if (nrMatchers >= minimumNrMatchers) {
-        return true;
-      } else if (scorerDocQueue.size() < minimumNrMatchers) {
-        return false;
+        break;
       }
-    } while (true);
+    }
+    
+    return doc;
+  }
+  
+  private void afterNext() throws IOException {
+    final Scorer sub = subScorers[0];
+    doc = sub.docID();
+    if (doc == NO_MORE_DOCS) {
+      nrMatchers = Integer.MAX_VALUE; // stop looping
+    } else {
+      score = sub.score();
+      nrMatchers = 1;
+      countMatches(1);
+      countMatches(2);
+    }
+  }
+  
+  // TODO: this currently scores, but so did the previous impl
+  // TODO: remove recursion.
+  // TODO: if we separate scoring, out of here, modify this
+  // and afterNext() to terminate when nrMatchers == minimumNrMatchers
+  // then also change freq() to just always compute it from scratch
+  private void countMatches(int root) throws IOException {
+    if (root < numScorers && subScorers[root].docID() == doc) {
+      nrMatchers++;
+      score += subScorers[root].score();
+      countMatches((root<<1)+1);
+      countMatches((root<<1)+2);
+    }
   }
   
   /** Returns the score of the current document matching the query.
    * Initially invalid, until {@link #nextDoc()} is called the first time.
    */
   @Override
-  public float score() throws IOException { return (float)currentScore; }
+  public float score() throws IOException { 
+    return (float)score; 
+  }
    
   @Override
   public int docID() {
-    return currentDoc;
+    return doc;
   }
-  
-  /** Returns the number of subscorers matching the current document.
-   * Initially invalid, until {@link #nextDoc()} is called the first time.
-   */
-  public int nrMatchers() {
+
+  @Override
+  public float freq() throws IOException {
     return nrMatchers;
   }
 
   /**
    * Advances to the first match beyond the current whose document number is
    * greater than or equal to a given target. <br>
-   * The implementation uses the skipTo() method on the subscorers.
+   * The implementation uses the advance() method on the subscorers.
    * 
    * @param target
    *          The target document number.
@@ -218,20 +146,24 @@ class DisjunctionSumScorer extends Scorer {
    */
   @Override
   public int advance(int target) throws IOException {
-    if (scorerDocQueue.size() < minimumNrMatchers) {
-      return currentDoc = NO_MORE_DOCS;
-    }
-    if (target <= currentDoc) {
-      return currentDoc;
-    }
-    do {
-      if (scorerDocQueue.topDoc() >= target) {
-        return advanceAfterCurrent() ? currentDoc : (currentDoc = NO_MORE_DOCS);
-      } else if (!scorerDocQueue.topSkipToAndAdjustElsePop(target)) {
-        if (scorerDocQueue.size() < minimumNrMatchers) {
-          return currentDoc = NO_MORE_DOCS;
+    if (numScorers == 0) return doc = NO_MORE_DOCS;
+    while (subScorers[0].docID() < target) {
+      if (subScorers[0].advance(target) != NO_MORE_DOCS) {
+        heapAdjust(0);
+      } else {
+        heapRemoveRoot();
+        if (numScorers == 0) {
+          return doc = NO_MORE_DOCS;
         }
       }
-    } while (true);
+    }
+    
+    afterNext();
+
+    if (nrMatchers >= minimumNrMatchers) {
+      return doc;
+    } else {
+      return nextDoc();
+    }
   }
 }
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
index a9d2d04..2f94397 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqExclScorer.java
@@ -102,7 +102,12 @@ class ReqExclScorer extends Scorer {
   public float score() throws IOException {
     return reqScorer.score(); // reqScorer may be null when next() or skipTo() already return false
   }
-  
+
+  @Override
+  public float freq() throws IOException {
+    return reqScorer.freq();
+  }
+
   @Override
   public int advance(int target) throws IOException {
     if (reqScorer == null) {
diff --git a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
index ad9a9c1..bb677a2 100644
--- a/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
+++ b/lucene/core/src/java/org/apache/lucene/search/ReqOptSumScorer.java
@@ -80,5 +80,11 @@ class ReqOptSumScorer extends Scorer {
     return optScorerDoc == curDoc ? reqScore + optScorer.score() : reqScore;
   }
 
+  @Override
+  public float freq() throws IOException {
+    // we might have deferred advance()
+    score();
+    return (optScorer != null && optScorer.docID() == reqScorer.docID()) ? 2 : 1;
+  }
 }
 
diff --git a/lucene/core/src/java/org/apache/lucene/util/Constants.java b/lucene/core/src/java/org/apache/lucene/util/Constants.java
index c142779..c283b73 100644
--- a/lucene/core/src/java/org/apache/lucene/util/Constants.java
+++ b/lucene/core/src/java/org/apache/lucene/util/Constants.java
@@ -122,7 +122,7 @@ public final class Constants {
   // NOTE: we track per-segment version as a String with the "X.Y" format, e.g.
   // "4.0", "3.1", "3.0". Therefore when we change this constant, we should keep
   // the format.
-  public static final String LUCENE_MAIN_VERSION = ident("3.6.1");
+  public static final String LUCENE_MAIN_VERSION = ident("3.6.2");
 
   public static final String LUCENE_VERSION;
   static {
diff --git a/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java b/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java
index 3c4357f..def8b9d 100644
--- a/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java
+++ b/lucene/core/src/java/org/apache/lucene/util/PagedBytes.java
@@ -411,7 +411,7 @@ public final class PagedBytes {
 
     /** Returns the current byte position. */
     public long getPosition() {
-      return currentBlockIndex * blockSize + currentBlockUpto;
+      return ((long) currentBlockIndex * blockSize) + currentBlockUpto;
     }
   
     /** Seek to a position previously obtained from
@@ -516,7 +516,7 @@ public final class PagedBytes {
       if (currentBlock == null) {
         return 0;
       } else {
-        return blocks.size() * blockSize + upto;
+        return ((long) blocks.size() * blockSize) + upto;
       }
     }
   }
diff --git a/lucene/core/src/java/org/apache/lucene/util/ScorerDocQueue.java b/lucene/core/src/java/org/apache/lucene/util/ScorerDocQueue.java
index 952672b..0ff7c36 100755
--- a/lucene/core/src/java/org/apache/lucene/util/ScorerDocQueue.java
+++ b/lucene/core/src/java/org/apache/lucene/util/ScorerDocQueue.java
@@ -29,7 +29,9 @@ import org.apache.lucene.search.Scorer;
   require log(size) time. The ordering is by Scorer.doc().
  *
  * @lucene.internal
+ * @deprecated 
  */
+ at Deprecated
 public class ScorerDocQueue {  // later: SpansQueue for spans with doc and term positions
   private final HeapedScorerDoc[] heap;
   private final int maxSize;
diff --git a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
index 9b079a8..b0048ea 100644
--- a/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
+++ b/lucene/core/src/test/org/apache/lucene/index/TestIndexWriter.java
@@ -1909,4 +1909,53 @@ public class TestIndexWriter extends LuceneTestCase {
     r.close();
     dir.close();
   }
+
+  // LUCENE-4398
+  public void testRotatingFieldNames() throws Exception {
+    Directory dir = newFSDirectory(_TestUtil.getTempDir("TestIndexWriter.testChangingFields"));
+    IndexWriterConfig iwc = new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random));
+    iwc.setRAMBufferSizeMB(0.2);
+    iwc.setMaxBufferedDocs(-1);
+    IndexWriter w = new IndexWriter(dir, iwc);
+    int upto = 0;
+
+    int secondFlushDocCount = -1;
+    for(int iter=0;iter<10;iter++) {
+      final int startFlushCount = w.getFlushCount();
+      int docCount = 0;
+      while(w.getFlushCount() == startFlushCount) {
+        Document doc = new Document();
+        for(int i=0;i<10;i++) {
+          Field f = new Field("field" + (upto++), "content", Field.Store.NO, Field.Index.ANALYZED);
+          f.setOmitNorms(true);
+          doc.add(f);
+        }
+        w.addDocument(doc);
+        docCount++;
+      }
+
+      if (VERBOSE) {
+        System.out.println("TEST: iter=" + iter + " flushed after docCount=" + docCount);
+      }
+
+      if (iter == 1) {
+        // Use 2nd not 1st flush because 1st flush is
+        // "unusually" high because all allocations are
+        // fresh:
+        secondFlushDocCount = docCount;
+      } else if (iter > 1) {
+        assertTrue("flushed after too few docs: 2nd segment flushed at docCount=" + secondFlushDocCount + ", but current segment flushed after docCount=" + docCount + "; iter=" + iter, ((float) docCount) / secondFlushDocCount > 0.9);
+      }
+
+      if (upto > 5000) {
+        // Start re-using field names after a while
+        // ... important because otherwise we can OOME due
+        // to too many FieldInfo instances.
+        upto = 0;
+      }
+    }
+
+    w.close();
+    dir.close();
+  }
 }
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
index f33804c..25d3e01 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBoolean2.java
@@ -226,6 +226,14 @@ public class TestBoolean2 extends LuceneTestCase {
       searcher.setSimilarity(oldSimilarity);
     }
   }
+  
+  // die serialization, die
+  static class FunkySimilarity extends DefaultSimilarity {
+    @Override
+    public float coord(int overlap, int maxOverlap) {
+      return overlap / ((float)maxOverlap + 1);
+    }
+  }
 
   @Test
   public void testRandomQueries() throws Exception {
@@ -247,6 +255,13 @@ public class TestBoolean2 extends LuceneTestCase {
         Sort sort = Sort.INDEXORDER;
 
         QueryUtils.check(random, q1,searcher);
+        final Similarity oldSim = searcher.getSimilarity();
+        try {
+          searcher.setSimilarity(new FunkySimilarity());
+          QueryUtils.check(random, q1,searcher);
+        } finally {
+          searcher.setSimilarity(oldSim);
+        }
 
         TopFieldCollector collector = TopFieldCollector.create(sort, 1000,
             false, true, true, true);
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
index a00c61c..aa47c69 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanMinShouldMatch.java
@@ -23,6 +23,8 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.DefaultSimilarity;
+import org.apache.lucene.search.Similarity;
 import org.apache.lucene.store.Directory;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -295,8 +297,8 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
     }
 
     public void testRandomQueries() throws Exception {
-      String field="data";
-      String[] vals = {"1","2","3","4","5","6","A","Z","B","Y","Z","X","foo"};
+      final String field="data";
+      final String[] vals = {"1","2","3","4","5","6","A","Z","B","Y","Z","X","foo"};
       int maxLev=4;
 
       // callback object to set a random setMinimumNumberShouldMatch
@@ -308,13 +310,18 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
             if (c[i].getOccur() == BooleanClause.Occur.SHOULD) opt++;
           }
           q.setMinimumNumberShouldMatch(random.nextInt(opt+2));
+          if (random.nextBoolean()) {
+            // also add a random negation
+            Term randomTerm = new Term(field, vals[random.nextInt(vals.length)]);
+            q.add(new TermQuery(randomTerm), BooleanClause.Occur.MUST_NOT);
+          }
         }
       };
 
 
 
       // increase number of iterations for more complete testing      
-      int num = atLeast(10);
+      int num = atLeast(20);
       for (int i = 0; i < num; i++) {
         int lev = random.nextInt(maxLev);
         final long seed = random.nextLong();
@@ -334,44 +341,90 @@ public class TestBooleanMinShouldMatch extends LuceneTestCase {
           QueryUtils.check(random, q1,s);
           QueryUtils.check(random, q2,s);
         }
-        // The constrained query
-        // should be a superset to the unconstrained query.
-        if (top2.totalHits > top1.totalHits) {
-          fail("Constrained results not a subset:\n"
-                        + CheckHits.topdocsString(top1,0,0)
-                        + CheckHits.topdocsString(top2,0,0)
-                        + "for query:" + q2.toString());
-        }
-
-        for (int hit=0; hit<top2.totalHits; hit++) {
-          int id = top2.scoreDocs[hit].doc;
-          float score = top2.scoreDocs[hit].score;
-          boolean found=false;
-          // find this doc in other hits
-          for (int other=0; other<top1.totalHits; other++) {
-            if (top1.scoreDocs[other].doc == id) {
-              found=true;
-              float otherScore = top1.scoreDocs[other].score;
-              // check if scores match
-              assertEquals("Doc " + id + " scores don't match\n"
-                  + CheckHits.topdocsString(top1,0,0)
-                  + CheckHits.topdocsString(top2,0,0)
-                  + "for query:" + q2.toString(),
-                  score, otherScore, 1.0e-6f);
-            }
-          }
+        assertSubsetOfSameScores(q2, top1, top2);
+      }
+      // System.out.println("Total hits:"+tot);
+    }
+    
+    private void assertSubsetOfSameScores(Query q, TopDocs top1, TopDocs top2) {
+      // The constrained query
+      // should be a subset to the unconstrained query.
+      if (top2.totalHits > top1.totalHits) {
+        fail("Constrained results not a subset:\n"
+                      + CheckHits.topdocsString(top1,0,0)
+                      + CheckHits.topdocsString(top2,0,0)
+                      + "for query:" + q.toString());
+      }
 
-          // check if subset
-          if (!found) fail("Doc " + id + " not found\n"
+      for (int hit=0; hit<top2.totalHits; hit++) {
+        int id = top2.scoreDocs[hit].doc;
+        float score = top2.scoreDocs[hit].score;
+        boolean found=false;
+        // find this doc in other hits
+        for (int other=0; other<top1.totalHits; other++) {
+          if (top1.scoreDocs[other].doc == id) {
+            found=true;
+            float otherScore = top1.scoreDocs[other].score;
+            // check if scores match
+            assertEquals("Doc " + id + " scores don't match\n"
                 + CheckHits.topdocsString(top1,0,0)
                 + CheckHits.topdocsString(top2,0,0)
-                + "for query:" + q2.toString());
+                + "for query:" + q.toString(),
+                score, otherScore, CheckHits.EXPLAIN_SCORE_TOLERANCE_DELTA);
+          }
         }
+
+        // check if subset
+        if (!found) fail("Doc " + id + " not found\n"
+              + CheckHits.topdocsString(top1,0,0)
+              + CheckHits.topdocsString(top2,0,0)
+              + "for query:" + q.toString());
       }
-      // System.out.println("Total hits:"+tot);
     }
 
-
+    public void testRewriteCoord1() throws Exception {
+      final Similarity oldSimilarity = s.getSimilarity();
+      try {
+        s.setSimilarity(new DefaultSimilarity() {
+          @Override
+          public float coord(int overlap, int maxOverlap) {
+            return overlap / ((float)maxOverlap + 1);
+          }
+        });
+        BooleanQuery q1 = new BooleanQuery();
+        q1.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
+        BooleanQuery q2 = new BooleanQuery();
+        q2.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
+        q2.setMinimumNumberShouldMatch(1);
+        TopDocs top1 = s.search(q1,null,100);
+        TopDocs top2 = s.search(q2,null,100);
+        assertSubsetOfSameScores(q2, top1, top2);
+      } finally {
+        s.setSimilarity(oldSimilarity);
+      }
+    }
+    
+    public void testRewriteNegate() throws Exception {
+      final Similarity oldSimilarity = s.getSimilarity();
+      try {
+        s.setSimilarity(new DefaultSimilarity() {
+          @Override
+          public float coord(int overlap, int maxOverlap) {
+            return overlap / ((float)maxOverlap + 1);
+          }
+        });
+        BooleanQuery q1 = new BooleanQuery();
+        q1.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
+        BooleanQuery q2 = new BooleanQuery();
+        q2.add(new TermQuery(new Term("data", "1")), BooleanClause.Occur.SHOULD);
+        q2.add(new TermQuery(new Term("data", "Z")), BooleanClause.Occur.MUST_NOT);
+        TopDocs top1 = s.search(q1,null,100);
+        TopDocs top2 = s.search(q2,null,100);
+        assertSubsetOfSameScores(q2, top1, top2);
+      } finally {
+        s.setSimilarity(oldSimilarity);
+      }
+    }
 
     protected void printHits(String test, ScoreDoc[] h, Searcher searcher) throws Exception {
 
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
new file mode 100644
index 0000000..59fe952
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanQueryVisitSubscorers.java
@@ -0,0 +1,204 @@
+package org.apache.lucene.search;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.lucene.analysis.Analyzer;
+import org.apache.lucene.analysis.MockAnalyzer;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.Field;
+import org.apache.lucene.document.Field.Index;
+import org.apache.lucene.document.Field.Store;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriterConfig;
+import org.apache.lucene.index.RandomIndexWriter;
+import org.apache.lucene.index.Term;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.util.LuceneTestCase;
+
+// TODO: refactor to a base class, that collects freqs from the scorer tree
+// and test all queries with it
+public class TestBooleanQueryVisitSubscorers extends LuceneTestCase {
+  Analyzer analyzer;
+  IndexReader reader;
+  IndexSearcher searcher;
+  Directory dir;
+  
+  static final String F1 = "title";
+  static final String F2 = "body";
+  
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    analyzer = new MockAnalyzer(random);
+    dir = newDirectory();
+    IndexWriterConfig config = newIndexWriterConfig(TEST_VERSION_CURRENT, analyzer);
+    config.setMergePolicy(newLogMergePolicy()); // we will use docids to validate
+    RandomIndexWriter writer = new RandomIndexWriter(random, dir, config);
+    writer.addDocument(doc("lucene", "lucene is a very popular search engine library"));
+    writer.addDocument(doc("solr", "solr is a very popular search server and is using lucene"));
+    writer.addDocument(doc("nutch", "nutch is an internet search engine with web crawler and is using lucene and hadoop"));
+    reader = writer.getReader();
+    writer.close();
+    searcher = new IndexSearcher(reader);
+  }
+  
+  @Override
+  public void tearDown() throws Exception {
+    reader.close();
+    dir.close();
+    super.tearDown();
+  }
+
+  public void testDisjunctions() throws IOException {
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new TermQuery(new Term(F1, "lucene")), BooleanClause.Occur.SHOULD);
+    bq.add(new TermQuery(new Term(F2, "lucene")), BooleanClause.Occur.SHOULD);
+    bq.add(new TermQuery(new Term(F2, "search")), BooleanClause.Occur.SHOULD);
+    Map<Integer,Integer> tfs = getDocCounts(searcher, bq);
+    assertEquals(3, tfs.size()); // 3 documents
+    assertEquals(3, tfs.get(0).intValue()); // f1:lucene + f2:lucene + f2:search
+    assertEquals(2, tfs.get(1).intValue()); // f2:search + f2:lucene
+    assertEquals(2, tfs.get(2).intValue()); // f2:search + f2:lucene
+  }
+  
+  public void testNestedDisjunctions() throws IOException {
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new TermQuery(new Term(F1, "lucene")), BooleanClause.Occur.SHOULD);
+    BooleanQuery bq2 = new BooleanQuery();
+    bq2.add(new TermQuery(new Term(F2, "lucene")), BooleanClause.Occur.SHOULD);
+    bq2.add(new TermQuery(new Term(F2, "search")), BooleanClause.Occur.SHOULD);
+    bq.add(bq2, BooleanClause.Occur.SHOULD);
+    Map<Integer,Integer> tfs = getDocCounts(searcher, bq);
+    assertEquals(3, tfs.size()); // 3 documents
+    assertEquals(3, tfs.get(0).intValue()); // f1:lucene + f2:lucene + f2:search
+    assertEquals(2, tfs.get(1).intValue()); // f2:search + f2:lucene
+    assertEquals(2, tfs.get(2).intValue()); // f2:search + f2:lucene
+  }
+  
+  public void testConjunctions() throws IOException {
+    BooleanQuery bq = new BooleanQuery();
+    bq.add(new TermQuery(new Term(F2, "lucene")), BooleanClause.Occur.MUST);
+    bq.add(new TermQuery(new Term(F2, "is")), BooleanClause.Occur.MUST);
+    Map<Integer,Integer> tfs = getDocCounts(searcher, bq);
+    assertEquals(3, tfs.size()); // 3 documents
+    assertEquals(2, tfs.get(0).intValue()); // f2:lucene + f2:is
+    assertEquals(3, tfs.get(1).intValue()); // f2:is + f2:is + f2:lucene
+    assertEquals(3, tfs.get(2).intValue()); // f2:is + f2:is + f2:lucene
+  }
+  
+  static Document doc(String v1, String v2) {
+    Document doc = new Document();
+    doc.add(new Field(F1, v1, Store.YES, Index.NOT_ANALYZED_NO_NORMS));
+    doc.add(new Field(F2, v2, Store.YES, Index.ANALYZED));
+    return doc;
+  }
+  
+  static Map<Integer,Integer> getDocCounts(IndexSearcher searcher, Query query) throws IOException {
+    MyCollector collector = new MyCollector();
+    searcher.search(query, collector);
+    return collector.docCounts;
+  }
+  
+  static class MyCollector extends Collector {
+    
+    private TopDocsCollector<ScoreDoc> collector;
+    private int docBase;
+
+    public final Map<Integer,Integer> docCounts = new HashMap<Integer,Integer>();
+    private final Set<TermQueryScorer> tqsSet = new HashSet<TermQueryScorer>();
+    private final Scorer.ScorerVisitor<Query, Query, Scorer> visitor = new MockScorerVisitor();
+    
+    MyCollector() {
+      collector = TopScoreDocCollector.create(10, true);
+    }
+
+    @Override
+    public boolean acceptsDocsOutOfOrder() {
+      return false;
+    }
+
+    @Override
+    public void collect(int doc) throws IOException {
+      int freq = 0;
+      for(TermQueryScorer scorer : tqsSet) {
+        if (doc == scorer.scorer.docID()) {
+          freq += scorer.scorer.freq();
+        }
+      }
+      docCounts.put(doc + docBase, freq);
+      collector.collect(doc);
+    }
+
+    @Override
+    public void setNextReader(IndexReader reader, int docBase) throws IOException {
+      this.docBase = docBase;
+      collector.setNextReader(reader, docBase);
+    }
+
+    @Override
+    public void setScorer(Scorer scorer) throws IOException {
+      collector.setScorer(scorer);
+      tqsSet.clear();
+      scorer.visitScorers(visitor);
+    }
+    
+    public TopDocs topDocs(){
+      return collector.topDocs();
+    }
+    
+    public int freq(int doc) throws IOException {
+      return docCounts.get(doc);
+    }
+    
+    private class MockScorerVisitor extends Scorer.ScorerVisitor<Query, Query, Scorer> {
+      
+      @Override
+      public void visitOptional(Query parent, Query child, Scorer scorer) {
+        if (child instanceof TermQuery)
+          tqsSet.add(new TermQueryScorer((TermQuery) child, scorer));
+      }
+
+      @Override
+      public void visitProhibited(Query parent, Query child, Scorer scorer) {
+        if (child instanceof TermQuery)
+          tqsSet.add(new TermQueryScorer((TermQuery) child, scorer));
+      }
+
+      @Override
+      public void visitRequired(Query parent, Query child, Scorer scorer) {
+        if (child instanceof TermQuery)
+          tqsSet.add(new TermQueryScorer((TermQuery) child, scorer));
+      }
+    }
+
+    private static class TermQueryScorer {
+      private TermQuery query;
+      private Scorer scorer;
+      public TermQueryScorer(TermQuery query, Scorer scorer) {
+        this.query = query;
+        this.scorer = scorer;
+      }
+    }
+  }
+}
diff --git a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
index 6f965af..766f16f 100644
--- a/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
+++ b/lucene/core/src/test/org/apache/lucene/search/TestBooleanScorer.java
@@ -27,6 +27,7 @@ import org.apache.lucene.document.Field;
 import org.apache.lucene.index.IndexReader;
 import org.apache.lucene.index.RandomIndexWriter;
 import org.apache.lucene.index.Term;
+import org.apache.lucene.search.BooleanQuery.BooleanWeight;
 import org.apache.lucene.store.Directory;
 import org.apache.lucene.util.LuceneTestCase;
 
@@ -85,7 +86,14 @@ public class TestBooleanScorer extends LuceneTestCase
       }
       
     }};
-    BooleanScorer bs = new BooleanScorer(null, false, sim, 1, Arrays.asList(scorers), null, scorers.length);
+    Directory directory = newDirectory();
+    RandomIndexWriter writer = new RandomIndexWriter(random, directory);
+    writer.commit();
+    IndexReader ir = writer.getReader();
+    writer.close();
+    IndexSearcher searcher = newSearcher(ir);
+    BooleanWeight weight = (BooleanWeight) new BooleanQuery().createWeight(searcher);
+    BooleanScorer bs = new BooleanScorer(weight, false, sim, 1, Arrays.asList(scorers), null, scorers.length);
 
     final List<Integer> hits = new ArrayList<Integer>();
     bs.score(new Collector() {
@@ -112,6 +120,8 @@ public class TestBooleanScorer extends LuceneTestCase
 
     assertEquals("should have only 1 hit", 1, hits.size());
     assertEquals("hit should have been docID=3000", 3000, hits.get(0).intValue());
+    ir.close();
+    directory.close();
   }
 
   public void testMoreThan32ProhibitedClauses() throws Exception {
diff --git a/lucene/core/src/test/org/apache/lucene/util/Test2BPagedBytes.java b/lucene/core/src/test/org/apache/lucene/util/Test2BPagedBytes.java
new file mode 100644
index 0000000..4f71a16
--- /dev/null
+++ b/lucene/core/src/test/org/apache/lucene/util/Test2BPagedBytes.java
@@ -0,0 +1,68 @@
+package org.apache.lucene.util;
+
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.Random;
+
+import org.apache.lucene.util.PagedBytes.PagedBytesDataInput;
+import org.apache.lucene.util.PagedBytes.PagedBytesDataOutput;
+import org.junit.Ignore;
+
+ at Ignore("You must increase heap to > 2 G to run this")
+public class Test2BPagedBytes extends LuceneTestCase {
+
+  public void test() throws Exception {
+    PagedBytes pb = new PagedBytes(15);
+    PagedBytesDataOutput dataOutput = pb.getDataOutput();
+    long netBytes = 0;
+    long seed = random.nextLong();
+    long lastFP = 0;
+    Random r2 = new Random(seed);
+    while(netBytes < 1.1*Integer.MAX_VALUE) {
+      int numBytes = _TestUtil.nextInt(r2, 1, 100000);
+      byte[] bytes = new byte[numBytes];
+      r2.nextBytes(bytes);
+      dataOutput.writeBytes(bytes, bytes.length);
+      long fp = dataOutput.getPosition();
+      assert fp == lastFP + numBytes;
+      lastFP = fp;
+      netBytes += numBytes;
+    }
+    pb.freeze(true);
+
+    PagedBytesDataInput dataInput = pb.getDataInput();
+    lastFP = 0;
+    r2 = new Random(seed);
+    netBytes = 0;
+    while(netBytes < 1.1*Integer.MAX_VALUE) {
+      int numBytes = _TestUtil.nextInt(r2, 1, 100000);
+      byte[] bytes = new byte[numBytes];
+      r2.nextBytes(bytes);
+
+      byte[] bytesIn = new byte[numBytes];
+      dataInput.readBytes(bytesIn, 0, numBytes);
+      assertTrue(Arrays.equals(bytes, bytesIn));
+
+      long fp = dataInput.getPosition();
+      assert fp == lastFP + numBytes;
+      lastFP = fp;
+      netBytes += numBytes;
+    }
+  }
+}
diff --git a/lucene/site/src/documentation/content/xdocs/tabs.xml b/lucene/site/src/documentation/content/xdocs/tabs.xml
index 8627275..14ce790 100755
--- a/lucene/site/src/documentation/content/xdocs/tabs.xml
+++ b/lucene/site/src/documentation/content/xdocs/tabs.xml
@@ -35,5 +35,5 @@
 
   <tab id="" label="Main" href="http://lucene.apache.org/java/docs/"/>
   <tab id="wiki" label="Wiki" href="http://wiki.apache.org/lucene-java"/>
-  <tab label="Lucene 3.6.1 Documentation" dir=""/>
+  <tab label="Lucene 3.6.2 Documentation" dir=""/>
 </tabs>
diff --git a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
index 493d5bc..614ebec 100644
--- a/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
+++ b/lucene/test-framework/src/java/org/apache/lucene/index/RandomIndexWriter.java
@@ -90,7 +90,7 @@ public class RandomIndexWriter implements Closeable {
     w = new MockIndexWriter(r, dir, c);
     flushAt = _TestUtil.nextInt(r, 10, 1000);
     if (LuceneTestCase.VERBOSE) {
-      System.out.println("RIW config=" + w.getConfig());
+      System.out.println("RIW dir=" + dir + " config=" + w.getConfig());
     }
 
     // Make sure we sometimes test indices that don't get
diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt
index 9f54808..eca7bc9 100644
--- a/solr/CHANGES.txt
+++ b/solr/CHANGES.txt
@@ -20,6 +20,16 @@ when more advanced customization is required.
 See README.txt and http://lucene.apache.org/solr for more information
 on how to get started.
 
+==================  3.6.2  ==================
+
+Bug Fixes
+----------------------
+* SOLR-3790: ConcurrentModificationException could be thrown when using hl.fl=*.
+  (yonik, koji)
+
+* SOLR-3589: Edismax parser does not honor mm parameter if analyzer splits a token.
+  (Tom Burton-West, Robert Muir)
+
 ==================  3.6.1  ==================
 More information about this release, including any errata related to the 
 release notes, upgrade instructions, or other changes may be found online at:
@@ -34,7 +44,7 @@ Bug Fixes:
 * SOLR-3361: ReplicationHandler "maxNumberOfBackups" doesn't work if backups are triggered on commit
   (James Dyer, Tomas Fernandez Lobbe)
 
-* SOLR-3375: Fix charset problems with HttpSolrServer (Roger Håkansson, yonik, siren)
+* SOLR-3375: Fix charset problems with HttpSolrServer (Roger HÃ¥kansson, yonik, siren)
 
 * SOLR-3436: Group count incorrect when not all shards are queried in the second
   pass. (Francois Perron, Martijn van Groningen)
@@ -44,7 +54,7 @@ Bug Fixes:
 
 * SOLR-3489: Config file replication less error prone (Jochen Just via janhoy)
 
-* SOLR-3477: SOLR does not start up when no cores are defined (Tomás Fernández Löbbe via tommaso)
+* SOLR-3477: SOLR does not start up when no cores are defined (Tomás Fernández Löbbe via tommaso)
 
 ==================  3.6.0  ==================
 More information about this release, including any errata related to the 
@@ -169,7 +179,7 @@ New Features
   (Greg Fodor & Andrew Morrison via janhoy, rmuir, Uwe Schindler)
 
 * SOLR-3026: eDismax: Locking down which fields can be explicitly queried (user fields aka uf)
-  (janhoy, hossmann, Tomás Fernández Löbbe)
+  (janhoy, hossmann, Tomás Fernández Löbbe)
 
 * SOLR-2826: URLClassify Update Processor (janhoy)
 
@@ -654,7 +664,7 @@ New Features
   params.  (Yury Kats, hossman)
 
 * SOLR-2714: JSON update format - "null" field values are now dropped
-  instead of causing an exception. (Trygve Laugstøl, yonik)
+  instead of causing an exception. (Trygve Laugstøl, yonik)
 
 
 Optimizations
@@ -680,7 +690,7 @@ Bug Fixes
 
 * SOLR-2230: CommonsHttpSolrServer.addFile could not be used to send 
   multiple files in a single request.
-  (Stephan Günther, hossman)
+  (Stephan Günther, hossman)
 
 * SOLR-2541: PluginInfos was not correctly parsing <long/> tags when 
   initializing plugins
@@ -723,7 +733,7 @@ Bug Fixes
   (Mitsu Hadeishi, hossman)
 
 * SOLR-2734: Fix debug info for MorLikeThisHandler (introduced when SOLR-860 was backported to 3x).
-  (Andrés Cobas, hossman via koji)
+  (Andrés Cobas, hossman via koji)
 
  Other Changes
 ----------------------
@@ -792,7 +802,7 @@ New Features
 * SOLR-2524: (SOLR-236, SOLR-237, SOLR-1773, SOLR-1311) Grouping / Field collapsing
   using the Lucene grouping contrib. The search result can be grouped by field and query. 
   (Martijn van Groningen, Emmanuel Keller, Shalin Shekhar Mangar, Koji Sekiguchi, 
-   Iván de Prado, Ryan McKinley, Marc Sturlese, Peter Karich, Bojan Smid, 
+   Iván de Prado, Ryan McKinley, Marc Sturlese, Peter Karich, Bojan Smid, 
    Charles Hornberger, Dieter Grad, Dmitry Lihachev, Doug Steigerwald,
    Karsten Sperling, Michael Gundlach, Oleg Gnatovskiy, Thomas Traeger,
    Harish Agarwal, yonik, Michael McCandless, Bill Bell)
@@ -813,7 +823,7 @@ Bug Fixes
 
 * SOLR-2519: Improve text_* fieldTypes in example schema.xml: improve
   cross-language defaults for text_general; break out separate
-  English-specific fieldTypes (Jan Høydahl, hossman, Robert Muir,
+  English-specific fieldTypes (Jan Høydahl, hossman, Robert Muir,
   yonik, Mike McCandless)
 
 * SOLR-2462: Fix extremely high memory usage problems with spellcheck.collate.
@@ -954,7 +964,7 @@ Other Changes
   (Drew Farris, Robert Muir, Steve Rowe)
 
 * SOLR-2105: Rename RequestHandler param 'update.processor' to 'update.chain'.
-	(Jan Høydahl via Mark Miller)
+	(Jan Høydahl via Mark Miller)
 	
 * SOLR-2485: Deprecate BaseResponseWriter, GenericBinaryResponseWriter, and 
   GenericTextResponseWriter.  These classes will be removed in 4.0.  (ryan)
@@ -1247,7 +1257,7 @@ Optimizations
 
 Bug Fixes
 ----------------------
-* SOLR-1769: Solr 1.4 Replication - Repeater throwing NullPointerException (Jörgen Rydenius via noble)
+* SOLR-1769: Solr 1.4 Replication - Repeater throwing NullPointerException (Jörgen Rydenius via noble)
 
 * SOLR-1432: Make the new ValueSource.getValues(context,reader) delegate
   to the original ValueSource.getValues(reader) so custom sources
@@ -2501,7 +2511,7 @@ Other Changes
 
 44. Upgraded to Lucene 2.9-dev r801856 (Mark Miller)
 
-45. SOLR1276: Added StatsComponentTest (Rafa�ł Ku�ć, gsingers)
+45. SOLR1276: Added StatsComponentTest (Rafa�ł Ku�ć, gsingers)
 
 46. SOLR-1377:  The TokenizerFactory API has changed to explicitly return a Tokenizer 
     rather then a TokenStream (that may be or may not be a Tokenizer).  This change 
@@ -2541,7 +2551,7 @@ Build
 
 Documentation
 ----------------------
- 1. SOLR-789: The javadoc of RandomSortField is not readable (Nicolas Lalev�Á�e via koji)
+ 1. SOLR-789: The javadoc of RandomSortField is not readable (Nicolas Lalev���e via koji)
 
  2. SOLR-962: Note about null handling in ModifiableSolrParams.add javadoc
     (Kay Kay via hossman)
@@ -2982,7 +2992,7 @@ Bug Fixes
  9. SOLR-294: Logging of elapsed time broken on Solaris because the date command
     there does not support the %s output format.  (bill)
 
-10. SOLR-136: Snappuller - "date -d" and locales don't mix.  (J�Á�rgen Hermann via    bill)
+10. SOLR-136: Snappuller - "date -d" and locales don't mix.  (J���rgen Hermann via    bill)
 
 11. SOLR-333: Changed distributiondump.jsp to use Solr HOME instead of CWD to set path.
  
diff --git a/solr/common-build.xml b/solr/common-build.xml
index 87451d4..5cddc0a 100644
--- a/solr/common-build.xml
+++ b/solr/common-build.xml
@@ -24,7 +24,7 @@
   <dirname file="${ant.file.common-solr}" property="common-solr.dir"/>
   
   <property name="Name" value="Solr" />
-  <property name="version" value="3.6.1-SNAPSHOT"/>
+  <property name="version" value="3.6.2-SNAPSHOT"/>
   <condition property="version.contains.SNAPSHOT">
     <contains casesensitive="true" string="${version}" substring="-SNAPSHOT"/>
   </condition>
@@ -71,7 +71,7 @@
        By default, this should be set to "X.Y.M.${dateversion}"
        where X.Y.M is the last version released (on this branch).
     -->
-  <property name="solr.spec.version" value="3.6.1.${dateversion}" />
+  <property name="solr.spec.version" value="3.6.2.${dateversion}" />
 
   <path id="solr.base.classpath">
   	<pathelement path="${analyzers-common.jar}"/>
diff --git a/solr/contrib/analysis-extras/CHANGES.txt b/solr/contrib/analysis-extras/CHANGES.txt
index a846b10..451eb59 100644
--- a/solr/contrib/analysis-extras/CHANGES.txt
+++ b/solr/contrib/analysis-extras/CHANGES.txt
@@ -10,7 +10,9 @@ It includes integration with ICU for multilingual support, and
 analyzers for Chinese and Polish.
 
 
-$Id$
+==================  3.6.2  ==================
+
+(No Changes)
 
 ==================  3.6.1 ==================
 
diff --git a/solr/contrib/clustering/CHANGES.txt b/solr/contrib/clustering/CHANGES.txt
index 504dfac..40f762f 100644
--- a/solr/contrib/clustering/CHANGES.txt
+++ b/solr/contrib/clustering/CHANGES.txt
@@ -6,7 +6,9 @@ See http://wiki.apache.org/solr/ClusteringComponent
 
 CHANGES
 
-$Id$
+==================  3.6.2  ==================
+
+(No Changes)
 
 ================== Release 3.6.1 ==================
 
diff --git a/solr/contrib/dataimporthandler/CHANGES.txt b/solr/contrib/dataimporthandler/CHANGES.txt
index 24c5636..1ca5955 100644
--- a/solr/contrib/dataimporthandler/CHANGES.txt
+++ b/solr/contrib/dataimporthandler/CHANGES.txt
@@ -7,7 +7,19 @@ DataImportHandler is a data import tool for Solr which makes importing data from
 HTTP data sources quick and easy.
 
 
-$Id$
+==================  3.6.2  ==================
+
+Bug Fixes
+----------------------
+* SOLR-3779: DataImportHandler's LineEntityProcessor when used in conjunction 
+  with FileListEntityProcessor would only process the first file.
+  (Ahmet Arslan via James Dyer)
+  
+* SOLR-3791: CachedSqlEntityProcessor would throw a NullPointerException when 
+  a query returns a row with a NULL key.  (Steffen Moelter via James Dyer)
+  
+* SOLR-3850: DataImportHandler "cacheKey" parameter was incorrectly renamed "cachePk"
+  (James Dyer)
 
 ==================  3.6.1 ==================
 
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHCacheSupport.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHCacheSupport.java
index b3200d6..42cb7ce 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHCacheSupport.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/DIHCacheSupport.java
@@ -237,15 +237,13 @@ public class DIHCacheSupport {
    */
   public static final String CACHE_FOREIGN_KEY = "cacheLookup";
 
-
-
   /**
    * <p>
    * Specify the Primary Key field from this Entity to map the input records
    * with
    * </p>
    */
-  public static final String CACHE_PRIMARY_KEY = "cachePk";
+  public static final String CACHE_PRIMARY_KEY = "cacheKey";
   /**
    * <p>
    * If true, a pre-existing cache is re-opened for read-only access.
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessor.java
index be3fe49..4541312 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/EntityProcessor.java
@@ -90,8 +90,7 @@ public abstract class EntityProcessor {
   public abstract Map<String, Object> nextModifiedParentRowKey();
 
   /**
-   * Invoked for each parent-row after the last row for this entity is processed. If this is the root-most
-   * entity, it will be called only once in the import, at the very end.
+   * Invoked for each entity at the very end of the import to do any needed cleanup tasks.
    * 
    */
   public abstract void destroy();
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
index 30e3663..5394ea6 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/LineEntityProcessor.java
@@ -116,7 +116,11 @@ public class LineEntityProcessor extends EntityProcessorBase {
              "Problem reading from input", exp);
       }
   
-      if (line == null) return null; // end of input       
+      // end of input
+      if (line == null) {
+        closeResources();
+        return null;
+      }
 
       // First scan whole line to see if we want it
       if (acceptLineRegex != null && ! acceptLineRegex.matcher(line).find()) continue;
@@ -127,13 +131,17 @@ public class LineEntityProcessor extends EntityProcessorBase {
       return row;
     }
   }
+  
+  public void closeResources() {
+    if (reader != null) {
+      IOUtils.closeQuietly(reader);
+    }
+    reader= null;
+  }
 
     @Override
     public void destroy() {
-      if (reader != null) {
-        IOUtils.closeQuietly(reader);
-      }
-      reader= null;
+      closeResources();
       super.destroy();
     }
 
diff --git a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SortedMapBackedCache.java b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SortedMapBackedCache.java
index 2700fd5..2239319 100644
--- a/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SortedMapBackedCache.java
+++ b/solr/contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SortedMapBackedCache.java
@@ -54,6 +54,10 @@ public class SortedMapBackedCache implements DIHCache {
       }
       pk = c.iterator().next();
     }
+    //Rows with null keys are not added.
+    if(pk==null) {
+      return;
+    }
     List<Map<String,Object>> thisKeysRecs = theMap.get(pk);
     if (thisKeysRecs == null) {
       thisKeysRecs = new ArrayList<Map<String,Object>>();
@@ -85,6 +89,9 @@ public class SortedMapBackedCache implements DIHCache {
   public void delete(Object key) {
     checkOpen(true);
     checkReadOnly();
+    if(key==null) {
+      return;
+    }
     theMap.remove(key);
   }
   
@@ -114,6 +121,9 @@ public class SortedMapBackedCache implements DIHCache {
   
   public Iterator<Map<String,Object>> iterator(Object key) {
     checkOpen(true);
+    if(key==null) {
+      return null;
+    }
     if(key instanceof Iterable<?>) {
       List<Map<String,Object>> vals = new ArrayList<Map<String,Object>>();
       Iterator<?> iter = ((Iterable<?>) key).iterator();
diff --git a/solr/contrib/dataimporthandler/src/test-files/dih/solr/conf/data-config-end-to-end.xml b/solr/contrib/dataimporthandler/src/test-files/dih/solr/conf/data-config-end-to-end.xml
index 6dc7e33..85eb6cd 100644
--- a/solr/contrib/dataimporthandler/src/test-files/dih/solr/conf/data-config-end-to-end.xml
+++ b/solr/contrib/dataimporthandler/src/test-files/dih/solr/conf/data-config-end-to-end.xml
@@ -12,7 +12,7 @@
       <field column="COUNTRY_CODE" sourceColName="COUNTRY_CODES" splitBy="," />
  
 <!-- 
- Instead of using 'cachePk'/'cacheLookup' as done below, we could have done:
+ Instead of using 'cacheKey'/'cacheLookup' as done below, we could have done:
   where="CODE=People.COUNTRY_CODE"
 --> 
       <entity 
@@ -20,7 +20,7 @@
         processor="SqlEntityProcessor"
         dataSource="hsqldb" 
         cacheImpl="SortedMapBackedCache"
-        cachePk="CODE"
+        cacheKey="CODE"
         cacheLookup="People.COUNTRY_CODE"
         
         query="SELECT CODE, COUNTRY_NAME FROM COUNTRIES"
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java
index 540206c..1722a35 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestEphemeralCache.java
@@ -113,7 +113,7 @@ public class TestEphemeralCache extends AbstractDataImportHandlerTestCase {
       "       processor=\"SqlEntityProcessor\"" +
       "       cacheImpl=\"org.apache.solr.handler.dataimport.DestroyCountCache\"" +
       "       cacheName=\"CHILD\"" +
-      "       cachePk=\"id\"" +
+      "       cacheKey=\"id\"" +
       "       cacheLookup=\"PARENT.id\"" +
       "       fieldNames=\"id,         child1a_mult_s, child1b_s\"" +
       "       fieldTypes=\"BIGDECIMAL, STRING,         STRING\"" +
@@ -123,7 +123,7 @@ public class TestEphemeralCache extends AbstractDataImportHandlerTestCase {
       "       name=\"CHILD_2\"" +
       "       processor=\"SqlEntityProcessor\"" +
       "       cacheImpl=\"org.apache.solr.handler.dataimport.DestroyCountCache\"" +
-      "       cachePk=\"id\"" +
+      "       cacheKey=\"id\"" +
       "       cacheLookup=\"PARENT.id\"" +
       "       query=\"SELECT * FROM CHILD_2\"       " +
       "     />" +
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFileListWithLineEntityProcessor.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFileListWithLineEntityProcessor.java
new file mode 100644
index 0000000..7c348e6
--- /dev/null
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestFileListWithLineEntityProcessor.java
@@ -0,0 +1,66 @@
+package org.apache.solr.handler.dataimport;
+
+import java.io.File;
+
+import org.apache.solr.request.LocalSolrQueryRequest;
+import org.junit.BeforeClass;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class TestFileListWithLineEntityProcessor extends AbstractDataImportHandlerTestCase {
+  @BeforeClass
+  public static void beforeClass() throws Exception {
+    initCore("dataimport-solrconfig.xml", "dataimport-schema.xml");
+  }
+  
+  public void test() throws Exception {
+    File tmpdir = File.createTempFile("test", "tmp", TEMP_DIR);
+    tmpdir.delete();
+    tmpdir.mkdir();
+    tmpdir.deleteOnExit();
+    createFile(tmpdir, "a.txt", "a line one\na line two\na line three".getBytes(), false);
+    createFile(tmpdir, "b.txt", "b line one\nb line two".getBytes(), false);
+    createFile(tmpdir, "c.txt", "c line one\nc line two\nc line three\nc line four".getBytes(), false);
+    
+    String config = generateConfig(tmpdir);
+    LocalSolrQueryRequest request = lrf.makeRequest(
+        "command", "full-import", "dataConfig", config, "debug", "true",
+        "clean", "true", "commit", "true", "synchronous", "true", "indent", "true");
+    h.query("/dataimport", request);
+    
+    assertQ(req("*:*"), "//*[@numFound='9']");
+    assertQ(req("id:?\\ line\\ one"), "//*[@numFound='3']");
+    assertQ(req("id:a\\ line*"), "//*[@numFound='3']");
+    assertQ(req("id:b\\ line*"), "//*[@numFound='2']");
+    assertQ(req("id:c\\ line*"), "//*[@numFound='4']");    
+  }
+  
+  private String generateConfig(File dir) {
+    return
+    "<dataConfig> \n"+
+    "<dataSource type=\"FileDataSource\" encoding=\"UTF-8\" name=\"fds\"/> \n"+
+    "    <document> \n"+
+    "       <entity name=\"f\" processor=\"FileListEntityProcessor\" fileName=\".*[.]txt\" baseDir=\"" + dir.getAbsolutePath() + "\" recursive=\"false\" rootEntity=\"false\"  transformer=\"TemplateTransformer\"> \n" +
+    "             <entity name=\"jc\" processor=\"LineEntityProcessor\" url=\"${f.fileAbsolutePath}\" dataSource=\"fds\"  rootEntity=\"true\" transformer=\"TemplateTransformer\"> \n" +
+    "              <field column=\"rawLine\" name=\"id\" /> \n" +
+    "             </entity> \n"+              
+    "        </entity> \n"+
+    "    </document> \n"+
+    "</dataConfig> \n";
+  }  
+}
diff --git a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java
index 6014fa7..7bf8c9d 100644
--- a/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java
+++ b/solr/contrib/dataimporthandler/src/test/org/apache/solr/handler/dataimport/TestSortedMapBackedCache.java
@@ -20,6 +20,7 @@ package org.apache.solr.handler.dataimport;
 import java.math.BigDecimal;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 
@@ -71,6 +72,37 @@ public class TestSortedMapBackedCache extends AbstractDIHCacheTestCase {
 			}
 		}
 	}
+	
+	@Test
+	public void testNullKeys() throws Exception {
+	  //A null key should just be ignored, but not throw an exception
+	  DIHCache cache = null;
+	  try {
+	    cache = new SortedMapBackedCache();
+	    Map<String, String> cacheProps = new HashMap<String, String>();
+      cacheProps.put(DIHCacheSupport.CACHE_PRIMARY_KEY, "a_id");
+      cache.open(getContext(cacheProps));
+      
+      Map<String,Object> data = new HashMap<String,Object>();
+      data.put("a_id", null);
+      data.put("bogus", "data");
+      cache.add(data);
+      
+      Iterator<Map<String, Object>> cacheIter = cache.iterator();
+      while (cacheIter.hasNext()) {
+        Assert.fail("cache should be empty.");
+      }
+      Assert.assertNull(cache.iterator(null));
+      cache.delete(null);      
+	  } catch (Exception e) {
+	    throw e;
+    } finally {
+      try {
+        cache.destroy();
+      } catch (Exception ex) {
+      }
+    }	  
+	}
 
 	@Test
 	public void testCacheReopensWithUpdate() {
diff --git a/solr/contrib/extraction/CHANGES.txt b/solr/contrib/extraction/CHANGES.txt
index 329d68e..7795950 100644
--- a/solr/contrib/extraction/CHANGES.txt
+++ b/solr/contrib/extraction/CHANGES.txt
@@ -22,7 +22,9 @@ Tika Dependency
 
 Current Version: Tika 1.0 (released 2011-11-07)
 
-$Id$
+================== Release 3.6.2 ==================
+
+(No Changes)
 
 ================== Release 3.6.1 ==================
 
diff --git a/solr/contrib/langid/CHANGES.txt b/solr/contrib/langid/CHANGES.txt
index f76ed39..dac3572 100644
--- a/solr/contrib/langid/CHANGES.txt
+++ b/solr/contrib/langid/CHANGES.txt
@@ -5,7 +5,9 @@ This file describes changes to the SolrTika Language Identifier (contrib/langid)
 See http://wiki.apache.org/solr/LanguageDetection for details
 
 
-$Id$
+================== Release 3.6.2 ==================
+
+(No Changes)
 
 ================== Release 3.6.1 ==================
 
diff --git a/solr/contrib/uima/CHANGES.txt b/solr/contrib/uima/CHANGES.txt
index b04c18c..0753275 100644
--- a/solr/contrib/uima/CHANGES.txt
+++ b/solr/contrib/uima/CHANGES.txt
@@ -17,7 +17,9 @@ HMMTagger           v2.3.1
 AlchemyAPIAnnotator v2.3.1
 WhitespaceTokenizer v2.3.1
 
-$Id$
+==================  3.6.2  ==================
+
+(No Changes)
 
 ==================  3.6.1 ==================
 
diff --git a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java
index 63fd46e..0398a79 100755
--- a/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java
+++ b/solr/core/src/java/org/apache/solr/search/ExtendedDismaxQParserPlugin.java
@@ -121,7 +121,10 @@ class ExtendedDismaxQParser extends QParser {
     SolrParams params = getParams();
     
     solrParams = SolrParams.wrapDefaults(localParams, params);
-
+    // Solr 4.0 sets the default at 0% if q.op=OR and %100 if q.op =AND
+    // just go with the flow and use 3.6 default of 100% here
+    final String minShouldMatch = solrParams.get(DisMaxParams.MM, "100%");
+     
     userFields = new UserFields(U.parseFieldBoosts(solrParams.getParams(DMP.UF)));
     
     queryFields = SolrPluginUtils.parseFieldBoosts(solrParams.getParams(DisMaxParams.QF));
@@ -235,7 +238,9 @@ class ExtendedDismaxQParser extends QParser {
       // For correct lucene queries, turn off mm processing if there
       // were explicit operators (except for AND).
       boolean doMinMatched = (numOR + numNOT + numPluses + numMinuses) == 0;
-
+      // but always for unstructured implicit bqs created by getFieldQuery
+      up.minShouldMatch = minShouldMatch;
+    
       try {
         up.setRemoveStopFilter(!stopwords);
         up.exceptions = true;
@@ -252,7 +257,6 @@ class ExtendedDismaxQParser extends QParser {
       }
 
       if (parsedUserQuery != null && doMinMatched) {
-        String minShouldMatch = solrParams.get(DisMaxParams.MM, "100%");
         if (parsedUserQuery instanceof BooleanQuery) {
           SolrPluginUtils.setMinShouldMatch((BooleanQuery)parsedUserQuery, minShouldMatch);
         }
@@ -295,8 +299,7 @@ class ExtendedDismaxQParser extends QParser {
         parsedUserQuery = up.parse(escapedUserQuery);
 
         // Only do minimum-match logic
-        String minShouldMatch = solrParams.get(DisMaxParams.MM, "100%");
-
+    
         if (parsedUserQuery instanceof BooleanQuery) {
           BooleanQuery t = new BooleanQuery();
           SolrPluginUtils.flattenBooleanQuery(t, (BooleanQuery)parsedUserQuery);
@@ -874,6 +877,7 @@ class ExtendedDismaxQParser extends QParser {
                               // used when constructing boosting part of query via sloppy phrases
     boolean exceptions;  //  allow exceptions to be thrown (for example on a missing field)
 
+    String minShouldMatch; // for inner boolean queries produced from a single fieldQuery
     ExtendedAnalyzer analyzer;
 
     /**
@@ -1117,6 +1121,18 @@ class ExtendedDismaxQParser extends QParser {
           case FIELD:  // fallthrough
           case PHRASE:
             Query query = super.getFieldQuery(field, val, type == QType.PHRASE);
+            // A BooleanQuery is only possible from getFieldQuery if it came from
+            // a single whitespace separated term. In this case, check the coordination
+            // factor on the query: if its enabled, that means we aren't a set of synonyms
+            // but instead multiple terms from one whitespace-separated term, we must
+            // apply minShouldMatch here so that it works correctly with other things
+            // like aliasing.
+            if (query instanceof BooleanQuery) {
+              BooleanQuery bq = (BooleanQuery) query;
+              if (!bq.isCoordDisabled()) {
+                SolrPluginUtils.setMinShouldMatch(bq, minShouldMatch);
+              }
+            }
             if (query instanceof PhraseQuery) {
               PhraseQuery pq = (PhraseQuery)query;
               if (minClauseSize > 1 && pq.getTerms().length < minClauseSize) return null;
diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
index f1dd86a..37cf671 100644
--- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
+++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java
@@ -289,22 +289,24 @@ public class SolrIndexSearcher extends IndexSearcher implements SolrInfoMBean {
    * highlighted the index reader knows about.
    */
   public Collection<String> getStoredHighlightFieldNames() {
-    if (storedHighlightFieldNames == null) {
-      storedHighlightFieldNames = new LinkedList<String>();
-      for (String fieldName : fieldNames) {
-        try {
-          SchemaField field = schema.getField(fieldName);
-          if (field.stored() &&
-                  ((field.getType() instanceof org.apache.solr.schema.TextField) ||
-                  (field.getType() instanceof org.apache.solr.schema.StrField))) {
-            storedHighlightFieldNames.add(fieldName);
-          }
-        } catch (RuntimeException e) { // getField() throws a SolrException, but it arrives as a RuntimeException
+    synchronized (this) {
+      if (storedHighlightFieldNames == null) {
+        storedHighlightFieldNames = new LinkedList<String>();
+        for (String fieldName : fieldNames) {
+          try {
+            SchemaField field = schema.getField(fieldName);
+            if (field.stored() &&
+                ((field.getType() instanceof org.apache.solr.schema.TextField) ||
+                    (field.getType() instanceof org.apache.solr.schema.StrField))) {
+              storedHighlightFieldNames.add(fieldName);
+            }
+          } catch (RuntimeException e) { // getField() throws a SolrException, but it arrives as a RuntimeException
             log.warn("Field \"" + fieldName + "\" found in index, but not defined in schema.");
+          }
         }
       }
+      return storedHighlightFieldNames;
     }
-    return storedHighlightFieldNames;
   }
   //
   // Set default regenerators on filter and query caches if they don't have any
diff --git a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
index 166ad31..c4c2a8d 100755
--- a/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
+++ b/solr/core/src/test/org/apache/solr/search/TestExtendedDismaxParser.java
@@ -55,6 +55,16 @@ public class TestExtendedDismaxParser extends AbstractSolrTestCase {
     assertU(adoc("id", "49", "text_sw", "start the big apple end", "foo_i","-100"));
     assertU(adoc("id", "50", "text_sw", "start new big city end"));
     assertU(adoc("id", "51", "store",   "12.34,-56.78"));
+    assertU(adoc("id", "52", "text_sw", "tekna theou klethomen"));
+    assertU(adoc("id", "53", "text_sw", "nun tekna theou esmen"));
+    assertU(adoc("id", "54", "text_sw", "phanera estin ta tekna tou theou"));
+    assertU(adoc("id", "55", "standardtok", "大"));
+    assertU(adoc("id", "56", "standardtok", "大亚"));
+    assertU(adoc("id", "57", "standardtok", "大亚湾"));
+    assertU(adoc("id", "58", "HTMLstandardtok", "大"));
+    assertU(adoc("id", "59", "HTMLstandardtok", "大亚"));
+    assertU(adoc("id", "60", "HTMLstandardtok", "大亚湾"));
+    assertU(adoc("id", "61", "text_sw", "bazaaa")); // synonyms in an expansion group
     assertU(commit());
   }
   @Override
@@ -501,4 +511,151 @@ public class TestExtendedDismaxParser extends AbstractSolrTestCase {
             "//str[@name='id'][.='145']",
             "//str[@name='id'][.='146']");
   }
+  /**
+   * SOLR-3589: Edismax parser does not honor mm parameter if analyzer splits a token
+   */
+  public void testCJK() throws Exception {
+    assertQ("test cjk (disjunction)",
+        req("q", "大亚湾",
+            "qf", "standardtok",
+            "mm", "0%",
+            "defType", "edismax")
+        , "*[count(//doc)=3]");
+    assertQ("test cjk (minShouldMatch)",
+        req("q", "大亚湾",
+            "qf", "standardtok",
+            "mm", "67%",
+            "defType", "edismax")
+        , "*[count(//doc)=2]");
+    assertQ("test cjk (conjunction)",
+        req("q", "大亚湾",
+            "qf", "standardtok",
+            "mm", "100%",
+            "defType", "edismax")
+        , "*[count(//doc)=1]");
+  }
+  /** 
+   * test that minShouldMatch works with aliasing
+   * for implicit boolean queries
+   */
+  public void testCJKAliasing() throws Exception {
+    // single field
+    assertQ("test cjk (aliasing+disjunction)",
+        req("q", "myalias:大亚湾",
+            "f.myalias.qf", "standardtok",
+            "mm", "0%",
+            "defType", "edismax")
+        , "*[count(//doc)=3]");
+    assertQ("test cjk (aliasing+minShouldMatch)",
+        req("q", "myalias:大亚湾",
+            "f.myalias.qf", "standardtok",
+            "mm", "67%",
+            "defType", "edismax")
+        , "*[count(//doc)=2]");
+    assertQ("test cjk (aliasing+conjunction)",
+        req("q", "myalias:大亚湾",
+            "f.myalias.qf", "standardtok",
+            "mm", "100%",
+            "defType", "edismax")
+        , "*[count(//doc)=1]");
+    // multifield
+    assertQ("test cjk (aliasing+disjunction)",
+        req("q", "myalias:大亚湾",
+            "f.myalias.qf", "standardtok HTMLstandardtok",
+            "mm", "0%",
+            "defType", "edismax")
+        , "*[count(//doc)=6]");
+    assertQ("test cjk (aliasing+minShouldMatch)",
+        req("q", "myalias:大亚湾",
+            "f.myalias.qf", "standardtok HTMLstandardtok",
+            "mm", "67%",
+            "defType", "edismax")
+        , "*[count(//doc)=4]");
+    assertQ("test cjk (aliasing+conjunction)",
+        req("q", "myalias:大亚湾",
+            "f.myalias.qf", "standardtok HTMLstandardtok",
+            "mm", "100%",
+            "defType", "edismax")
+        , "*[count(//doc)=2]");
+  }
+  
+  /** Test that we apply boosts correctly */
+  public void testCJKBoosts() throws Exception {
+    assertQ("test cjk (disjunction)",
+        req("q", "大亚湾",
+            "qf", "standardtok^2 HTMLstandardtok",
+            "mm", "0%",
+            "defType", "edismax")
+        , "*[count(//doc)=6]", "//result/doc[1]/str[@name='id'][.='57']");
+    assertQ("test cjk (minShouldMatch)",
+        req("q", "大亚湾",
+            "qf", "standardtok^2 HTMLstandardtok",
+            "mm", "67%",
+            "defType", "edismax")
+        , "*[count(//doc)=4]", "//result/doc[1]/str[@name='id'][.='57']");
+    assertQ("test cjk (conjunction)",
+        req("q", "大亚湾",
+            "qf", "standardtok^2 HTMLstandardtok",
+            "mm", "100%",
+            "defType", "edismax")
+        , "*[count(//doc)=2]", "//result/doc[1]/str[@name='id'][.='57']");
+    
+    // now boost the other field
+    assertQ("test cjk (disjunction)",
+        req("q", "大亚湾",
+            "qf", "standardtok HTMLstandardtok^2",
+            "mm", "0%",
+            "defType", "edismax")
+        , "*[count(//doc)=6]", "//result/doc[1]/str[@name='id'][.='60']");
+    assertQ("test cjk (minShouldMatch)",
+        req("q", "大亚湾",
+            "qf", "standardtok HTMLstandardtok^2",
+            "mm", "67%",
+            "defType", "edismax")
+        , "*[count(//doc)=4]", "//result/doc[1]/str[@name='id'][.='60']");
+    assertQ("test cjk (conjunction)",
+        req("q", "大亚湾",
+            "qf", "standardtok HTMLstandardtok^2",
+            "mm", "100%",
+            "defType", "edismax")
+        , "*[count(//doc)=2]", "//result/doc[1]/str[@name='id'][.='60']");
+  }
+  
+  /** always apply minShouldMatch to the inner booleanqueries
+   *  created from whitespace, as these are never structured lucene queries
+   *  but only come from unstructured text */
+  public void testCJKStructured() throws Exception {
+    assertQ("test cjk (disjunction)",
+        req("q", "大亚湾 OR bogus",
+            "qf", "standardtok",
+            "mm", "0%",
+            "defType", "edismax")
+        , "*[count(//doc)=3]");
+    assertQ("test cjk (minShouldMatch)",
+        req("q", "大亚湾 OR bogus",
+            "qf", "standardtok",
+            "mm", "67%",
+            "defType", "edismax")
+        , "*[count(//doc)=2]");
+    assertQ("test cjk (conjunction)",
+        req("q", "大亚湾 OR bogus",
+            "qf", "standardtok",
+            "mm", "100%",
+            "defType", "edismax")
+        , "*[count(//doc)=1]");
+  }
+  
+  /**
+   * Test that we don't apply minShouldMatch to the inner boolean queries
+   * when there are synonyms (these are indicated by coordination factor)
+   */
+  public void testSynonyms() throws Exception {
+    // document only contains baraaa, but should still match.
+    assertQ("test synonyms",
+        req("q", "fooaaa",
+            "qf", "text_sw",
+            "mm", "100%",
+            "defType", "edismax")
+        , "*[count(//doc)=1]");
+  }
 }

-- 
lucene-solr packaging



More information about the pkg-java-commits mailing list