Skip to content

Commit

Permalink
Ensure that searcher.getReader() is used when using Lucene test searc…
Browse files Browse the repository at this point in the history
…her (elastic#98932)

Related to: elastic#98110

Lucene test searcher will randomly wrap the passed reader. That means
any usages that mix the two (the searcher and the previously reader),
could cause tests to fail.

Some of these locations maybe don't strictly need the
searcher.getReader, but I just wanted to make sure we didn't have flaky
tests.

closes: elastic#98925
  • Loading branch information
benwtrent authored Aug 29, 2023
1 parent fa0f531 commit 553f75c
Show file tree
Hide file tree
Showing 7 changed files with 17 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOExce
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields fields = dr.getTermVectors(doc);
Fields fields = s.getIndexReader().termVectors().get(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(fields, null, flags, fields);
outResponse.setExists(true);
Expand Down Expand Up @@ -144,7 +144,7 @@ private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOE
TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1);
ScoreDoc[] scoreDocs = search.scoreDocs;
int doc = scoreDocs[0].doc;
Fields termVectors = dr.getTermVectors(doc);
Fields termVectors = s.getIndexReader().termVectors().get(doc);
EnumSet<Flag> flags = EnumSet.of(Flag.Positions, Flag.Offsets);
outResponse.setFields(termVectors, null, flags, termVectors);
dr.close();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -439,8 +439,9 @@ public void testAsSequentialBitsUsesRandomAccess() throws IOException {
w.addDocument(doc);
}
w.forceMerge(1);
try (IndexReader reader = DirectoryReader.open(w)) {
IndexSearcher searcher = newSearcher(reader);
try (IndexReader indexReader = DirectoryReader.open(w)) {
IndexSearcher searcher = newSearcher(indexReader);
IndexReader reader = searcher.getIndexReader();
searcher.setQueryCache(null);
Query query = new IndexOrDocValuesQuery(new UnsupportedQuery(), NumericDocValuesField.newSlowRangeQuery("foo", 3L, 5L));
Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1f);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ public void testVectorHighlighter() throws Exception {
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(
highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
reader,
searcher.getIndexReader(),
topDocs.scoreDocs[0].doc,
"content",
30
Expand All @@ -79,8 +79,9 @@ public void testVectorHighlighterPrefixQuery() throws Exception {
document.add(new Field("content", "the big bad dog", vectorsType));
indexWriter.addDocument(document);

IndexReader reader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = newSearcher(reader);
IndexReader indexReader = DirectoryReader.open(indexWriter);
IndexSearcher searcher = newSearcher(indexReader);
IndexReader reader = searcher.getIndexReader();
TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1);

assertThat(topDocs.totalHits.value, equalTo(1L));
Expand Down Expand Up @@ -143,7 +144,7 @@ public void testVectorHighlighterNoStore() throws Exception {
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(
highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
reader,
searcher.getIndexReader(),
topDocs.scoreDocs[0].doc,
"content",
30
Expand All @@ -169,7 +170,7 @@ public void testVectorHighlighterNoTermVector() throws Exception {
FastVectorHighlighter highlighter = new FastVectorHighlighter();
String fragment = highlighter.getBestFragment(
highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))),
reader,
searcher.getIndexReader(),
topDocs.scoreDocs[0].doc,
"content",
30
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -697,7 +697,7 @@ public void testPropagatesApproximations() throws IOException {
FunctionScoreQuery fsq = new FunctionScoreQuery(query, null, Float.POSITIVE_INFINITY);
for (org.apache.lucene.search.ScoreMode scoreMode : org.apache.lucene.search.ScoreMode.values()) {
Weight weight = searcher.createWeight(fsq, scoreMode, 1f);
Scorer scorer = weight.scorer(reader.leaves().get(0));
Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
assertNotNull(scorer.twoPhaseIterator());
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ public void testDuel() throws Exception {
MultiValueMode sortMode = randomFrom(Arrays.asList(MultiValueMode.MIN, MultiValueMode.MAX));
DirectoryReader reader = DirectoryReader.open(writer);
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0));
IndexSearcher searcher = newSearcher(reader);
IndexSearcher searcher = newSearcher(reader, false);
PagedBytesIndexFieldData indexFieldData1 = getForField("f");
IndexFieldData<?> indexFieldData2 = NoOrdinalsStringFieldDataTests.hideOrdinals(indexFieldData1);
final String missingValue = randomBoolean() ? null : TestUtil.randomSimpleString(random(), 2);
Expand Down Expand Up @@ -291,7 +291,7 @@ public void testNestedSorting() throws Exception {
MultiValueMode sortMode = MultiValueMode.MIN;
DirectoryReader reader = DirectoryReader.open(writer);
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0));
IndexSearcher searcher = newSearcher(reader);
IndexSearcher searcher = newSearcher(reader, false);
PagedBytesIndexFieldData indexFieldData = getForField("field2");
Query parentFilter = new TermQuery(new Term("_nested_path", "parent"));
Query childFilter = Queries.not(parentFilter);
Expand Down Expand Up @@ -612,7 +612,7 @@ public void testMultiLevelNestedSorting() throws IOException {
}
DirectoryReader reader = DirectoryReader.open(writer);
reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0));
IndexSearcher searcher = newSearcher(reader);
IndexSearcher searcher = newSearcher(reader, false);
SearchExecutionContext searchExecutionContext = indexService.newSearchExecutionContext(0, 0, searcher, () -> 0L, null, emptyMap());

FieldSortBuilder sortBuilder = new FieldSortBuilder("chapters.paragraphs.word_count");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ void assertSplit(Directory dir, IndexMetadata metadata, int targetShardId, boole
ScoreMode.COMPLETE_NO_SCORES,
1f
);
final List<LeafReaderContext> leaves = reader.leaves();
final List<LeafReaderContext> leaves = searcher.getIndexReader().leaves();
for (final LeafReaderContext ctx : leaves) {
Scorer scorer = splitWeight.scorer(ctx);
DocIdSetIterator iterator = scorer.iterator();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ public void testIntersectScorerAndRoleBits() throws Exception {
1f
);

LeafReaderContext leaf = directoryReader.leaves().get(0);
LeafReaderContext leaf = searcher.getIndexReader().leaves().get(0);

CombinedBitSet bitSet = new CombinedBitSet(query(leaf, "field1", "value1"), leaf.reader().getLiveDocs());
LeafCollector leafCollector = new LeafBucketCollector() {
Expand Down

0 comments on commit 553f75c

Please sign in to comment.