Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DO NOT REVIEW] Memory and CPU intensive search #15005

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -257,6 +257,10 @@ public void apply(Settings value, Settings current, Settings previous) {
public static Set<Setting<?>> BUILT_IN_CLUSTER_SETTINGS = Collections.unmodifiableSet(
new HashSet<>(
Arrays.asList(
// changed: for performComputeIntensiveTask
SearchService.COMPUTE_INTENSIVE_DURATION_SECONDS,
SearchService.MEMORY_OVERHEAD_PER_ITERATION,

AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING,
AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING,
AwarenessReplicaBalance.CLUSTER_ROUTING_ALLOCATION_AWARENESS_BALANCE_SETTING,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,8 @@ public final class IndicesRequestCache implements RemovalListener<ICacheKey<Indi
Property.NodeScope
);



private final static long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Key.class);

private final ConcurrentMap<CleanupKey, Boolean> registeredClosedListeners = ConcurrentCollections.newConcurrentMap();
Expand All @@ -162,6 +164,7 @@ public final class IndicesRequestCache implements RemovalListener<ICacheKey<Indi
public static final String SHARD_ID_DIMENSION_NAME = "shards";
public static final String INDEX_DIMENSION_NAME = "indices";


IndicesRequestCache(
Settings settings,
Function<ShardId, Optional<CacheEntity>> cacheEntityFunction,
Expand All @@ -182,6 +185,7 @@ public final class IndicesRequestCache implements RemovalListener<ICacheKey<Indi
this.clusterService = clusterService;
this.clusterService.getClusterSettings()
.addSettingsUpdateConsumer(INDICES_REQUEST_CACHE_STALENESS_THRESHOLD_SETTING, this::setStalenessThreshold);

this.cache = cacheService.createCache(
new CacheConfig.Builder<Key, BytesReference>().setSettings(settings)
.setWeigher(weigher)
Expand Down
55 changes: 54 additions & 1 deletion server/src/main/java/org/opensearch/search/SearchService.java
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@
import org.opensearch.index.shard.IndexShard;
import org.opensearch.index.shard.SearchOperationListener;
import org.opensearch.indices.IndicesService;
import org.opensearch.indices.IndicesRequestCache;
import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason;
import org.opensearch.node.ResponseCollectorService;
import org.opensearch.script.FieldScript;
Expand Down Expand Up @@ -171,6 +172,17 @@
public class SearchService extends AbstractLifecycleComponent implements IndexEventListener {
private static final Logger logger = LogManager.getLogger(SearchService.class);

// changed: for performComputeIntensiveTask
public static final Setting<Integer> COMPUTE_INTENSIVE_DURATION_SECONDS =
Setting.intSetting("search.service.experimental.compute_intensive.duration_seconds", 0, Setting.Property.Dynamic, Setting.Property.NodeScope);

public static final Setting<Integer> MEMORY_OVERHEAD_PER_ITERATION =
Setting.intSetting("search.service.experimental.memory_overhead.per_iteration", 0, Setting.Property.Dynamic, Setting.Property.NodeScope);

// changed: new setting
private volatile int computeIntensiveDurationSeconds;
private volatile int memoryOverheadPerIteration;

// we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes
public static final Setting<TimeValue> DEFAULT_KEEPALIVE_SETTING = Setting.positiveTimeSetting(
"search.default_keep_alive",
Expand Down Expand Up @@ -374,6 +386,13 @@ public SearchService(
TaskResourceTrackingService taskResourceTrackingService
) {
Settings settings = clusterService.getSettings();
// changed: new setting
this.computeIntensiveDurationSeconds = SearchService.COMPUTE_INTENSIVE_DURATION_SECONDS.get(settings);
this.memoryOverheadPerIteration = SearchService.MEMORY_OVERHEAD_PER_ITERATION.get(settings);
clusterService.getClusterSettings().addSettingsUpdateConsumer(SearchService.COMPUTE_INTENSIVE_DURATION_SECONDS, this::setComputeIntensiveDurationSeconds);
clusterService.getClusterSettings().addSettingsUpdateConsumer(SearchService.MEMORY_OVERHEAD_PER_ITERATION, this::setMemoryOverheadPerIteration);


this.threadPool = threadPool;
this.clusterService = clusterService;
this.indicesService = indicesService;
Expand Down Expand Up @@ -428,6 +447,35 @@ public SearchService(
clusterService.getClusterSettings().addSettingsUpdateConsumer(CLUSTER_ALLOW_DERIVED_FIELD_SETTING, this::setAllowDerivedField);
}

// changed: new setting to line 468
private void setComputeIntensiveDurationSeconds(int time) {
this.computeIntensiveDurationSeconds = time;
}

private void setMemoryOverheadPerIteration(int overhead) {
this.memoryOverheadPerIteration = overhead;
}

public void performComputeIntensiveTask() {
long endTime = System.currentTimeMillis() + computeIntensiveDurationSeconds * 1000;
logger.info("Starting compute-intensive task for {} seconds and {} bytes per iteration",
computeIntensiveDurationSeconds, memoryOverheadPerIteration);

int iterations = 0;
while (System.currentTimeMillis() < endTime) {
byte[] memoryHog = new byte[memoryOverheadPerIteration];
for (int j = 0; j < memoryOverheadPerIteration; j++) {
memoryHog[j] = (byte) (j % 256);
}
iterations++;
if (iterations % 1000 == 0) {
logger.info("Performed {} iterations", iterations);
Copy link
Contributor

@kiranprakash154 kiranprakash154 Jul 29, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
logger.info("Performed {} iterations", iterations);
logger.info("[ CPU_AND_MEMORY_INTENSIVE ] Performed {} iterations", iterations);

}
}
logger.info("Completed compute-intensive task");
}


private void validateKeepAlives(TimeValue defaultKeepAlive, TimeValue maxKeepAlive) {
if (defaultKeepAlive.millis() > maxKeepAlive.millis()) {
throw new IllegalArgumentException(
Expand Down Expand Up @@ -630,8 +678,12 @@ public void onResponse(ShardSearchRequest orig) {
return;
}
}
performComputeIntensiveTask();
// fork the execution in the search thread pool
runAsync(getExecutor(shard), () -> executeQueryPhase(orig, task, keepStatesInContext), listener);
runAsync(getExecutor(shard), () ->
// changed: Compute- and memory-intensive logic
// performComputeIntensiveTask();
executeQueryPhase(orig, task, keepStatesInContext), listener);
}

@Override
Expand All @@ -641,6 +693,7 @@ public void onFailure(Exception exc) {
});
}


private IndexShard getShard(ShardSearchRequest request) {
if (request.readerId() != null) {
return findReaderContext(request.readerId(), request).indexShard();
Expand Down
Loading