-
Notifications
You must be signed in to change notification settings - Fork 132
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Introduce new setting to configure when to build graph during segment creation #2007
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -66,6 +66,7 @@ public class KNNSettings { | |
* Settings name | ||
*/ | ||
public static final String KNN_SPACE_TYPE = "index.knn.space_type"; | ||
public static final String INDEX_KNN_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD = "index.knn.build_vector_data_structure_threshold"; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. not a blocker, but I think we need to rename this setting. Need to brainstorm some ideas, but this is too verbose. Some thoughts
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @VijayanB lets take this as AI to conclude on the naming. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sure. |
||
public static final String KNN_ALGO_PARAM_M = "index.knn.algo_param.m"; | ||
public static final String KNN_ALGO_PARAM_EF_CONSTRUCTION = "index.knn.algo_param.ef_construction"; | ||
public static final String KNN_ALGO_PARAM_EF_SEARCH = "index.knn.algo_param.ef_search"; | ||
|
@@ -92,6 +93,9 @@ public class KNNSettings { | |
*/ | ||
public static final boolean KNN_DEFAULT_FAISS_AVX2_DISABLED_VALUE = false; | ||
public static final String INDEX_KNN_DEFAULT_SPACE_TYPE = "l2"; | ||
public static final Integer INDEX_KNN_DEFAULT_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD = 0; | ||
public static final Integer INDEX_KNN_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD_MIN = -1; | ||
public static final Integer INDEX_KNN_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD_MAX = Integer.MAX_VALUE - 2; | ||
public static final String INDEX_KNN_DEFAULT_SPACE_TYPE_FOR_BINARY = "hamming"; | ||
public static final Integer INDEX_KNN_DEFAULT_ALGO_PARAM_M = 16; | ||
public static final Integer INDEX_KNN_DEFAULT_ALGO_PARAM_EF_SEARCH = 100; | ||
|
@@ -131,6 +135,21 @@ public class KNNSettings { | |
Setting.Property.Deprecated | ||
); | ||
|
||
/** | ||
* build_vector_data_structure_threshold - This parameter determines when to build vector data structure for knn fields during indexing | ||
* and merging. Setting -1 (min) will skip building graph, whereas on any other values, the graph will be built if | ||
* number of live docs in segment is greater than this threshold. Since max number of documents in a segment can | ||
* be Integer.MAX_VALUE - 1, this setting will allow threshold to be up to 1 less than max number of documents in a segment | ||
*/ | ||
public static final Setting<Integer> INDEX_KNN_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD_SETTING = Setting.intSetting( | ||
INDEX_KNN_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD, | ||
INDEX_KNN_DEFAULT_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD, | ||
INDEX_KNN_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD_MIN, | ||
INDEX_KNN_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD_MAX, | ||
IndexScope, | ||
Dynamic | ||
); | ||
|
||
/** | ||
* M - the number of bi-directional links created for every new element during construction. | ||
* Reasonable range for M is 2-100. Higher M work better on datasets with high intrinsic | ||
|
@@ -447,6 +466,7 @@ private Setting<?> getSetting(String key) { | |
public List<Setting<?>> getSettings() { | ||
List<Setting<?>> settings = Arrays.asList( | ||
INDEX_KNN_SPACE_TYPE, | ||
INDEX_KNN_BUILD_VECTOR_DATA_STRUCTURE_THRESHOLD_SETTING, | ||
INDEX_KNN_ALGO_PARAM_M_SETTING, | ||
INDEX_KNN_ALGO_PARAM_EF_CONSTRUCTION_SETTING, | ||
INDEX_KNN_ALGO_PARAM_EF_SEARCH_SETTING, | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -52,10 +52,16 @@ public class NativeEngines990KnnVectorsWriter extends KnnVectorsWriter { | |
private KNN990QuantizationStateWriter quantizationStateWriter; | ||
private final List<NativeEngineFieldVectorsWriter<?>> fields = new ArrayList<>(); | ||
private boolean finished; | ||
private final Integer buildVectorDataStructureThreshold; | ||
|
||
public NativeEngines990KnnVectorsWriter(SegmentWriteState segmentWriteState, FlatVectorsWriter flatVectorsWriter) { | ||
public NativeEngines990KnnVectorsWriter( | ||
SegmentWriteState segmentWriteState, | ||
FlatVectorsWriter flatVectorsWriter, | ||
Integer buildVectorDataStructureThreshold | ||
) { | ||
this.segmentWriteState = segmentWriteState; | ||
this.flatVectorsWriter = flatVectorsWriter; | ||
this.buildVectorDataStructureThreshold = buildVectorDataStructureThreshold; | ||
} | ||
|
||
/** | ||
|
@@ -83,24 +89,34 @@ public void flush(int maxDoc, final Sorter.DocMap sortMap) throws IOException { | |
final FieldInfo fieldInfo = field.getFieldInfo(); | ||
final VectorDataType vectorDataType = extractVectorDataType(fieldInfo); | ||
int totalLiveDocs = getLiveDocs(getVectorValues(vectorDataType, field.getDocsWithField(), field.getVectors())); | ||
if (totalLiveDocs > 0) { | ||
KNNVectorValues<?> knnVectorValues = getVectorValues(vectorDataType, field.getDocsWithField(), field.getVectors()); | ||
if (totalLiveDocs == 0) { | ||
log.debug("[Flush] No live docs for field {}", fieldInfo.getName()); | ||
continue; | ||
} | ||
KNNVectorValues<?> knnVectorValues = getVectorValues(vectorDataType, field.getDocsWithField(), field.getVectors()); | ||
|
||
final QuantizationState quantizationState = train(field.getFieldInfo(), knnVectorValues, totalLiveDocs); | ||
final NativeIndexWriter writer = NativeIndexWriter.getWriter(fieldInfo, segmentWriteState, quantizationState); | ||
final QuantizationState quantizationState = train(field.getFieldInfo(), knnVectorValues, totalLiveDocs); | ||
// Will consider building vector data structure based on threshold only for non quantization indices | ||
if (quantizationState == null && shouldSkipBuildingVectorDataStructure(totalLiveDocs)) { | ||
log.info( | ||
"Skip building vector data structure for field: {}, as liveDoc: {} is less than the threshold {} during flush", | ||
fieldInfo.name, | ||
totalLiveDocs, | ||
buildVectorDataStructureThreshold | ||
); | ||
continue; | ||
} | ||
final NativeIndexWriter writer = NativeIndexWriter.getWriter(fieldInfo, segmentWriteState, quantizationState); | ||
|
||
knnVectorValues = getVectorValues(vectorDataType, field.getDocsWithField(), field.getVectors()); | ||
knnVectorValues = getVectorValues(vectorDataType, field.getDocsWithField(), field.getVectors()); | ||
|
||
StopWatch stopWatch = new StopWatch().start(); | ||
StopWatch stopWatch = new StopWatch().start(); | ||
|
||
writer.flushIndex(knnVectorValues, totalLiveDocs); | ||
writer.flushIndex(knnVectorValues, totalLiveDocs); | ||
|
||
long time_in_millis = stopWatch.stop().totalTime().millis(); | ||
KNNGraphValue.REFRESH_TOTAL_TIME_IN_MILLIS.incrementBy(time_in_millis); | ||
log.debug("Flush took {} ms for vector field [{}]", time_in_millis, fieldInfo.getName()); | ||
} else { | ||
log.debug("[Flush] No live docs for field {}", fieldInfo.getName()); | ||
} | ||
long time_in_millis = stopWatch.stop().totalTime().millis(); | ||
KNNGraphValue.REFRESH_TOTAL_TIME_IN_MILLIS.incrementBy(time_in_millis); | ||
log.debug("Flush took {} ms for vector field [{}]", time_in_millis, fieldInfo.getName()); | ||
} | ||
} | ||
|
||
|
@@ -118,6 +134,16 @@ public void mergeOneField(final FieldInfo fieldInfo, final MergeState mergeState | |
|
||
KNNVectorValues<?> knnVectorValues = getKNNVectorValuesForMerge(vectorDataType, fieldInfo, mergeState); | ||
final QuantizationState quantizationState = train(fieldInfo, knnVectorValues, totalLiveDocs); | ||
// Will consider building vector data structure based on threshold only for non quantization indices | ||
if (quantizationState == null && shouldSkipBuildingVectorDataStructure(totalLiveDocs)) { | ||
log.info( | ||
"Skip building vector data structure for field: {}, as liveDoc: {} is less than the threshold {} during merge", | ||
fieldInfo.name, | ||
totalLiveDocs, | ||
buildVectorDataStructureThreshold | ||
); | ||
return; | ||
} | ||
final NativeIndexWriter writer = NativeIndexWriter.getWriter(fieldInfo, segmentWriteState, quantizationState); | ||
|
||
knnVectorValues = getKNNVectorValuesForMerge(vectorDataType, fieldInfo, mergeState); | ||
|
@@ -240,4 +266,11 @@ private void initQuantizationStateWriterIfNecessary() throws IOException { | |
quantizationStateWriter.writeHeader(segmentWriteState); | ||
} | ||
} | ||
|
||
private boolean shouldSkipBuildingVectorDataStructure(final long docCount) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This code path is being used for both merge and flush. Please merge from main, there is an operation name thats need to be considered (should be already in main branch) where this should always return false on merge There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @shatejas Are you suggesting that it should always return false on merge? If so, not necessarily, we don't want to build graph during background merges as well. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I missed the background merge case. A few questions around background merges
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. OpenSearch uses TieredMergePolicy to decide when to perform merge. IIRC, segmentsPerTier controls how many segments are allowed per tiers. When segment count goes beyond that limit in tier, it will run merges in the background. Choosing this value is generally a trade-off of indexing speed vs searching speed. Having fewer segments improves search since lucene need to look into less number of segments, but, this will increase indexing time because of more background merges. @navneet1v Please add more context if i miss anything. You can find default values here https://github.com/opensearch-project/OpenSearch/blob/main/server/src/main/java/org/opensearch/index/TieredMergePolicyProvider.java#L138 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for the link @VijayanB
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
No it is correct experience. It just defeats the overall purpose why the change at first place is getting added.
This is what threshold based graph creation will do. If user want to just do the graph creation during merges they can keep the threshold high.
If user is looking to do so then its better to use the threshold.
No it is not optional. Because even when you have small segments its not necessary that merges will happen.
Trigger for merge is very much dependent on indexing speed. You might not have merges for 1 whole day but the moment you add 1 more doc in index the merges may get triggered, so it really depends on the state of the shard aka Lucene index. Since there is no formal documentation or wiki around merges I would recommend reading this blog : https://blog.mikemccandless.com/2011/02/visualizing-lucenes-segment-merges.html which is an old blog from one of lucene maintainer which might help resolve some of the doubts you have related to merges. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Understood, so seems like we need the user to change the setting for force merge There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @shatejas Added unit test by rebasing from main. |
||
if (buildVectorDataStructureThreshold < 0) { | ||
return true; | ||
} | ||
return docCount < buildVectorDataStructureThreshold; | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Why this change log is added under unreleased 3.0?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good catch. I will add it to 2.x when i create PR to 2.x Currently this PR is raised against feature branch