Skip to content

Commit

Permalink
Add more logging for get path
Browse files Browse the repository at this point in the history
  • Loading branch information
rayshrey committed Jan 10, 2025
1 parent f0a7e9d commit 29a3569
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 4 deletions.
27 changes: 23 additions & 4 deletions server/src/main/java/org/opensearch/index/get/ShardGetService.java
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@

package org.opensearch.index.get;

import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.DocValuesType;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.IndexOptions;
Expand Down Expand Up @@ -87,6 +89,7 @@
import org.opensearch.index.shard.AbstractIndexShardComponent;
import org.opensearch.index.shard.IndexShard;
import org.opensearch.search.DocValueFormat;
import org.opensearch.search.fetch.FetchPhase;
import org.opensearch.search.fetch.subphase.FetchSourceContext;

import java.io.IOException;
Expand Down Expand Up @@ -500,7 +503,7 @@ private static FieldsVisitor buildFieldsVisitors(String[] fields, FetchSourceCon
return new CustomFieldsVisitor(Sets.newHashSet(fields), fetchSourceContext.fetchSource());
}

private static Map<String, Object> buildUsingDocValues(int docId, LeafReader reader, MapperService mapperService, IndexShard indexShard) throws IOException {
private Map<String, Object> buildUsingDocValues(int docId, LeafReader reader, MapperService mapperService, IndexShard indexShard) throws IOException {
Map<String, Object> docValues = new HashMap<>();
for (Mapper mapper: mapperService.documentMapper().mappers()) {
if (mapper instanceof MetadataFieldMapper) {
Expand Down Expand Up @@ -543,7 +546,12 @@ private static Map<String, Object> buildUsingDocValues(int docId, LeafReader rea
int size = doubleValues.docValueCount();
double[] vals = new double[size];
for (int i = 0; i < size; i++) {
vals[i] = doubleValues.nextValue();
try {
vals[i] = doubleValues.nextValue();
} catch (Exception e) {
logger.info("Exception while reading value from SNDV\nDoc Id : " + docId + ", DocValueCountSize : " + size + ", i : " + i);
throw e;
}
}
if (size > 1) {
docValues.put(fieldName, vals);
Expand All @@ -559,7 +567,12 @@ private static Map<String, Object> buildUsingDocValues(int docId, LeafReader rea
int size = sndv.docValueCount();
long[] vals = new long[size];
for (int i = 0; i < size; i++) {
vals[i] = sndv.nextValue();
try {
vals[i] = sndv.nextValue();
} catch (Exception e) {
logger.info("Exception while reading value from SNDV\nDoc Id : " + docId + ", DocValueCountSize : " + size + ", i : " + i);
throw e;
}
}
if (size > 1) {
docValues.put(fieldName, vals);
Expand All @@ -569,12 +582,18 @@ private static Map<String, Object> buildUsingDocValues(int docId, LeafReader rea
}
}
} else if (fieldMapper instanceof DateFieldMapper) {
logger.info("Get path -> Doc Id : " + docId + ", field : " + fieldName);
DateFormatter dateFormatter = ((DateFieldMapper) fieldMapper).fieldType().dateTimeFormatter();
if (sndv.advanceExact(docId)) {
int size = sndv.docValueCount();
String[] vals = new String[size];
for (int i = 0; i < size; i++) {
vals[i] = dateFormatter.formatMillis(sndv.nextValue());
try {
vals[i] = dateFormatter.formatMillis(sndv.nextValue());
} catch (Exception e) {
logger.info("Exception while reading value from SNDV\nDoc Id : " + docId + ", DocValueCountSize : " + size + ", i : " + i);
throw e;
}
}
if (size > 1) {
docValues.put(fieldName, vals);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,13 +176,15 @@ public void execute(SearchContext context) {
if (currentReaderContext.reader() instanceof SequentialStoredFieldsLeafReader
&& hasSequentialDocs
&& docs.length >= 10) {
logger.info("Reader checkpoint - SequentialStoredFieldsLeafReader");
// All the docs to fetch are adjacent but Lucene stored fields are optimized
// for random access and don't optimize for sequential access - except for merging.
// So we do a little hack here and pretend we're going to do merges in order to
// get better sequential access.
SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) currentReaderContext.reader();
fieldReader = lf.getSequentialStoredFieldsReader()::document;
} else {
logger.info("Reader checkpoint - Normal path - Not SequentialStoredFieldsLeafReader");
fieldReader = currentReaderContext.reader().storedFields()::document;
}
for (FetchSubPhaseProcessor processor : processors) {
Expand Down Expand Up @@ -507,6 +509,7 @@ private static Map<String, Object> buildUsingDocValues(int docId, LeafReader rea
}
}
} else if (fieldMapper instanceof DateFieldMapper) {
logger.info("Search path -> Doc Id : " + docId + ", field : " + fieldName);
DateFormatter dateFormatter = ((DateFieldMapper) fieldMapper).fieldType().dateTimeFormatter();
if (sndv.advanceExact(docId)) {
int size = sndv.docValueCount();
Expand Down

0 comments on commit 29a3569

Please sign in to comment.