Skip to content

Commit

Permalink
Fix bug in sort optimization (#1903) (#1915)
Browse files Browse the repository at this point in the history
Fix bug how iterator with skipping functionality
advances and produces docs

Relates to #1725
Backport for #1903
  • Loading branch information
mayya-sharipova authored Sep 23, 2020
1 parent b13945d commit da03351
Show file tree
Hide file tree
Showing 4 changed files with 76 additions and 35 deletions.
6 changes: 3 additions & 3 deletions lucene/core/src/java/org/apache/lucene/search/Weight.java
Original file line number Diff line number Diff line change
Expand Up @@ -220,13 +220,13 @@ public int score(LeafCollector collector, Bits acceptDocs, int min, int max) thr
// if possible filter scorerIterator to keep only competitive docs as defined by collector
DocIdSetIterator filteredIterator = collectorIterator == null ? scorerIterator :
ConjunctionDISI.intersectIterators(Arrays.asList(scorerIterator, collectorIterator));
if (scorer.docID() == -1 && min == 0 && max == DocIdSetIterator.NO_MORE_DOCS) {
if (filteredIterator.docID() == -1 && min == 0 && max == DocIdSetIterator.NO_MORE_DOCS) {
scoreAll(collector, filteredIterator, twoPhase, acceptDocs);
return DocIdSetIterator.NO_MORE_DOCS;
} else {
int doc = scorer.docID();
int doc = filteredIterator.docID();
if (doc < min) {
doc = scorerIterator.advance(min);
doc = filteredIterator.advance(min);
}
return scoreRange(collector, filteredIterator, twoPhase, acceptDocs, doc, max);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,16 +133,14 @@ public DocIdSetIterator competitiveIterator() {
return null;
} else {
return new DocIdSetIterator() {
private int doc;

@Override
public int nextDoc() throws IOException {
return doc = competitiveIterator.nextDoc();
return competitiveIterator.nextDoc();
}

@Override
public int docID() {
return doc;
return competitiveIterator.docID();
}

@Override
Expand All @@ -152,7 +150,7 @@ public long cost() {

@Override
public int advance(int target) throws IOException {
return doc = competitiveIterator.advance(target);
return competitiveIterator.advance(target);
}
};
}
Expand All @@ -176,7 +174,7 @@ private void updateIterator() {
if (docBase + maxDoc <= minDoc) {
competitiveIterator = DocIdSetIterator.empty(); // skip this segment
} else {
int segmentMinDoc = Math.max(0, minDoc - docBase);
int segmentMinDoc = Math.max(competitiveIterator.docID(), minDoc - docBase);
competitiveIterator = new MinDocIterator(segmentMinDoc, maxDoc);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,16 +228,14 @@ public PointValues.Relation compare(byte[] minPackedValue, byte[] maxPackedValue
public DocIdSetIterator competitiveIterator() {
if (enableSkipping == false) return null;
return new DocIdSetIterator() {
private int doc;

@Override
public int nextDoc() throws IOException {
return doc = competitiveIterator.nextDoc();
return competitiveIterator.nextDoc();
}

@Override
public int docID() {
return doc;
return competitiveIterator.docID();
}

@Override
Expand All @@ -247,7 +245,7 @@ public long cost() {

@Override
public int advance(int target) throws IOException {
return doc = competitiveIterator.advance(target);
return competitiveIterator.advance(target);
}
};
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@ public void testLongSortOptimization() throws IOException {
if (i == 7000) writer.flush(); // two segments
}
final IndexReader reader = DirectoryReader.open(writer);
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
final SortField sortField = new SortField("my_field", SortField.Type.LONG);
sortField.setCanUsePoints();
Expand Down Expand Up @@ -121,7 +122,6 @@ public void testLongSortOptimization() throws IOException {
assertEquals(topDocs.totalHits.value, numDocs); // assert that all documents were collected => optimization was not run
}

writer.close();
reader.close();
dir.close();
}
Expand All @@ -142,6 +142,7 @@ public void testLongSortOptimizationOnFieldNotIndexedWithPoints() throws IOExcep
writer.addDocument(doc);
}
final IndexReader reader = DirectoryReader.open(writer);
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
final SortField sortField = new SortField("my_field", SortField.Type.LONG);
sortField.setCanUsePoints();
Expand All @@ -159,7 +160,6 @@ public void testLongSortOptimizationOnFieldNotIndexedWithPoints() throws IOExcep
}
assertEquals(topDocs.totalHits.value, numDocs); // assert that all documents were collected => optimization was not run

writer.close();
reader.close();
dir.close();
}
Expand All @@ -179,6 +179,7 @@ public void testSortOptimizationWithMissingValues() throws IOException {
if (i == 7000) writer.flush(); // two segments
}
final IndexReader reader = DirectoryReader.open(writer);
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
final int numHits = 3;
final int totalHitsThreshold = 3;
Expand Down Expand Up @@ -206,7 +207,6 @@ public void testSortOptimizationWithMissingValues() throws IOException {
assertTrue(topDocs.totalHits.value < numDocs); // assert that some docs were skipped => optimization was run
}

writer.close();
reader.close();
dir.close();
}
Expand All @@ -224,6 +224,7 @@ public void testSortOptimizationEqualValues() throws IOException {
if (i == 7000) writer.flush(); // two segments
}
final IndexReader reader = DirectoryReader.open(writer);
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
final int numHits = 3;
final int totalHitsThreshold = 3;
Expand Down Expand Up @@ -278,7 +279,6 @@ public void testSortOptimizationEqualValues() throws IOException {
assertEquals(topDocs.totalHits.value, numDocs); // assert that all documents were collected => optimization was not run
}

writer.close();
reader.close();
dir.close();
}
Expand All @@ -296,6 +296,7 @@ public void testFloatSortOptimization() throws IOException {
writer.addDocument(doc);
}
final IndexReader reader = DirectoryReader.open(writer);
writer.close();
IndexSearcher searcher = new IndexSearcher(reader);
final SortField sortField = new SortField("my_field", SortField.Type.FLOAT);
sortField.setCanUsePoints();
Expand All @@ -316,7 +317,6 @@ public void testFloatSortOptimization() throws IOException {
assertTrue(topDocs.totalHits.value < numDocs);
}

writer.close();
reader.close();
dir.close();
}
Expand All @@ -329,14 +329,15 @@ public void testDocSortOptimizationWithAfter() throws IOException {
final Document doc = new Document();
writer.addDocument(doc);
if ((i > 0) && (i % 50 == 0)) {
writer.commit();
writer.flush();
}
}
final IndexReader reader = DirectoryReader.open(writer);
IndexSearcher searcher = new IndexSearcher(reader);
final int numHits = 3;
final int totalHitsThreshold = 3;
final int[] searchAfters = {10, 140, numDocs - 4};
writer.close();
IndexSearcher searcher = newSearcher(reader);
final int numHits = 10;
final int totalHitsThreshold = 10;
final int[] searchAfters = {3, 10, numDocs - 10};
for (int searchAfter : searchAfters) {
// sort by _doc with search after should trigger optimization
{
Expand All @@ -345,14 +346,15 @@ public void testDocSortOptimizationWithAfter() throws IOException {
final TopFieldCollector collector = TopFieldCollector.create(sort, numHits, after, totalHitsThreshold);
searcher.search(new MatchAllDocsQuery(), collector);
TopDocs topDocs = collector.topDocs();
assertEquals(numHits, topDocs.scoreDocs.length);
for (int i = 0; i < numHits; i++) {
int expNumHits = (searchAfter >= (numDocs - numHits)) ? (numDocs - searchAfter - 1) : numHits;
assertEquals(expNumHits, topDocs.scoreDocs.length);
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
int expectedDocID = searchAfter + 1 + i;
assertEquals(expectedDocID, topDocs.scoreDocs[i].doc);
}
assertTrue(collector.isEarlyTerminated());
// check that very few docs were collected
assertTrue(topDocs.totalHits.value < 10);
assertTrue(topDocs.totalHits.value < numDocs);
}

// sort by _doc + _score with search after should trigger optimization
Expand All @@ -362,14 +364,15 @@ public void testDocSortOptimizationWithAfter() throws IOException {
final TopFieldCollector collector = TopFieldCollector.create(sort, numHits, after, totalHitsThreshold);
searcher.search(new MatchAllDocsQuery(), collector);
TopDocs topDocs = collector.topDocs();
assertEquals(numHits, topDocs.scoreDocs.length);
for (int i = 0; i < numHits; i++) {
int expNumHits = (searchAfter >= (numDocs - numHits)) ? (numDocs - searchAfter - 1) : numHits;
assertEquals(expNumHits, topDocs.scoreDocs.length);
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
int expectedDocID = searchAfter + 1 + i;
assertEquals(expectedDocID, topDocs.scoreDocs[i].doc);
}
assertTrue(collector.isEarlyTerminated());
// assert that very few docs were collected
assertTrue(topDocs.totalHits.value < 10);
assertTrue(topDocs.totalHits.value < numDocs);
}

// sort by _doc desc should not trigger optimization
Expand All @@ -379,8 +382,9 @@ public void testDocSortOptimizationWithAfter() throws IOException {
final TopFieldCollector collector = TopFieldCollector.create(sort, numHits, after, totalHitsThreshold);
searcher.search(new MatchAllDocsQuery(), collector);
TopDocs topDocs = collector.topDocs();
assertEquals(numHits, topDocs.scoreDocs.length);
for (int i = 0; i < numHits; i++) {
int expNumHits = (searchAfter < numHits) ? searchAfter : numHits;
assertEquals(expNumHits, topDocs.scoreDocs.length);
for (int i = 0; i < topDocs.scoreDocs.length; i++) {
int expectedDocID = searchAfter - 1 - i;
assertEquals(expectedDocID, topDocs.scoreDocs[i].doc);
}
Expand All @@ -389,7 +393,6 @@ public void testDocSortOptimizationWithAfter() throws IOException {
}
}

writer.close();
reader.close();
dir.close();
}
Expand All @@ -407,12 +410,13 @@ public void testDocSortOptimization() throws IOException {
doc.add(new StringField("tf", "seg" + seg, Field.Store.YES));
writer.addDocument(doc);
if ((i > 0) && (i % 50 == 0)) {
writer.commit();
writer.flush();
seg++;
}
}
final IndexReader reader = DirectoryReader.open(writer);
IndexSearcher searcher = new IndexSearcher(reader);
writer.close();
IndexSearcher searcher = newSearcher(reader);;
final int numHits = 3;
final int totalHitsThreshold = 3;
final Sort sort = new Sort(FIELD_DOC);
Expand Down Expand Up @@ -450,7 +454,48 @@ public void testDocSortOptimization() throws IOException {
assertTrue(topDocs.totalHits.value < 10); // assert that very few docs were collected
}

reader.close();
dir.close();
}

/**
* Test that sorting on _doc works correctly.
* This test goes through DefaultBulkSorter::scoreRange, where scorerIterator is BitSetIterator.
* As a conjunction of this BitSetIterator with DocComparator's iterator, we get BitSetConjunctionDISI.
* BitSetConjunctionDISI advances based on the DocComparator's iterator, and doesn't consider
* that its BitSetIterator may have advanced passed a certain doc.
*/
public void testDocSort() throws IOException {
final Directory dir = newDirectory();
final IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig());
final int numDocs = 4;
for (int i = 0; i < numDocs; ++i) {
final Document doc = new Document();
doc.add(new StringField("id", "id" + i, Field.Store.NO));
if (i < 2) {
doc.add(new LongPoint("lf", 1));
}
writer.addDocument(doc);
}
final IndexReader reader = DirectoryReader.open(writer);
writer.close();

IndexSearcher searcher = newSearcher(reader);
searcher.setQueryCache(null);
final int numHits = 10;
final int totalHitsThreshold = 10;
final Sort sort = new Sort(FIELD_DOC);

{
final TopFieldCollector collector = TopFieldCollector.create(sort, numHits, null, totalHitsThreshold);
BooleanQuery.Builder bq = new BooleanQuery.Builder();
bq.add(LongPoint.newExactQuery("lf", 1), BooleanClause.Occur.MUST);
bq.add(new TermQuery(new Term("id", "id3")), BooleanClause.Occur.MUST_NOT);
searcher.search(bq.build(), collector);
TopDocs topDocs = collector.topDocs();
assertEquals(2, topDocs.scoreDocs.length);
}

reader.close();
dir.close();
}
Expand Down

0 comments on commit da03351

Please sign in to comment.