Skip to content

Commit

Permalink
Addressed Sagar's comments
Browse files Browse the repository at this point in the history
Signed-off-by: Peter Alfonsi <[email protected]>
  • Loading branch information
Peter Alfonsi committed Nov 5, 2024
1 parent dc515d9 commit f85b719
Show file tree
Hide file tree
Showing 7 changed files with 49 additions and 48 deletions.
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
- Fix warnings from SLF4J on startup when repository-s3 is installed ([#16194](https://github.com/opensearch-project/OpenSearch/pull/16194))
- Fix protobuf-java leak through client library dependencies ([#16254](https://github.com/opensearch-project/OpenSearch/pull/16254))
- Fix multi-search with template doesn't return status code ([#16265](https://github.com/opensearch-project/OpenSearch/pull/16265))
- Fix bug in new cache stats API when closing shards while using TieredSpilloverCache ([#16560](https://github.com/opensearch-project/OpenSearch/pull/16560))
- [Tiered Caching] Fix bug in cache stats API ([#16560](https://github.com/opensearch-project/OpenSearch/pull/16560))

### Security

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@
import java.util.Map;
import java.util.concurrent.TimeUnit;

import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;
import static org.opensearch.indices.IndicesService.INDICES_CACHE_CLEAN_INTERVAL_SETTING;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked;
import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
import org.opensearch.common.cache.RemovalReason;
import org.opensearch.common.cache.policy.CachedQueryResult;
import org.opensearch.common.cache.stats.ImmutableCacheStatsHolder;
import org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder;
import org.opensearch.common.cache.store.config.CacheConfig;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.settings.Setting;
Expand Down Expand Up @@ -54,10 +53,10 @@
import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_DISK_STORE_SIZE;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;
import static org.opensearch.common.cache.settings.CacheSettings.INVALID_SEGMENT_COUNT_EXCEPTION_MESSAGE;
import static org.opensearch.common.cache.settings.CacheSettings.VALID_SEGMENT_COUNT_VALUES;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;

/**
* This cache spillover the evicted items from heap tier to disk tier. All the new items are first cached on heap
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@
* compatible open source license.
*/

package org.opensearch.common.cache.stats;
package org.opensearch.cache.common.tier;

import org.opensearch.cache.common.tier.TieredSpilloverCache;
import org.opensearch.common.cache.stats.DefaultCacheStatsHolder;

import java.util.ArrayList;
import java.util.List;
Expand Down Expand Up @@ -177,20 +177,9 @@ public void removeDimensions(List<String> dimensionValues) {
// As we are removing nodes from the tree, obtain the lock
lock.lock();
try {
removeDimensionsHelper(dimensionValues, getStatsRoot(), 0);
removeDimensionsHelper(dimensionValues, statsRoot, 0);
} finally {
lock.unlock();
}
}

@Override
protected ImmutableCacheStats removeDimensionsBaseCase(Node node) {
// The base case will be the node whose children represent individual tiers.
// Manually delete this node's children
for (String tierValue : TIER_VALUES) {
node.children.remove(tierValue);
}
// Pass up a snapshot of the original stats to avoid issues when the original is decremented by other fn invocations
return node.getImmutableStats();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@
* compatible open source license.
*/

package org.opensearch.common.cache.stats;
package org.opensearch.cache.common.tier;

import org.opensearch.common.Randomness;
import org.opensearch.common.metrics.CounterMetric;
import org.opensearch.common.cache.stats.CacheStats;
import org.opensearch.common.cache.stats.DefaultCacheStatsHolder;
import org.opensearch.common.cache.stats.ImmutableCacheStats;
import org.opensearch.test.OpenSearchTestCase;

import java.util.ArrayList;
Expand All @@ -21,9 +23,9 @@
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CountDownLatch;

import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_VALUES;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_VALUES;

public class TieredSpilloverCacheStatsHolderTests extends OpenSearchTestCase {
// These are modified from DefaultCacheStatsHolderTests.java to account for the tiers. Because we can't add a dependency on server.test,
Expand Down Expand Up @@ -78,12 +80,17 @@ public void testReset() throws Exception {
cacheStatsHolder.reset();
for (List<String> dimensionValues : expected.keySet()) {
CacheStats originalCounter = expected.get(dimensionValues);
originalCounter.sizeInBytes = new CounterMetric();
originalCounter.items = new CounterMetric();
ImmutableCacheStats expectedTotal = new ImmutableCacheStats(
originalCounter.getHits(),
originalCounter.getMisses(),
originalCounter.getEvictions(),
0,
0
);

DefaultCacheStatsHolder.Node node = getNode(dimensionValues, cacheStatsHolder.getStatsRoot());
ImmutableCacheStats actual = node.getImmutableStats();
assertEquals(originalCounter.immutableSnapshot(), actual);
assertEquals(expectedTotal, actual);
}
}

Expand Down Expand Up @@ -130,7 +137,7 @@ public void testDropStatsForDimensions() throws Exception {
// When we invalidate the last node, all nodes should be deleted except the root node
cacheStatsHolder.removeDimensions(List.of("A2", "B3"));
assertEquals(new ImmutableCacheStats(0, 0, 0, 0, 0), cacheStatsHolder.getStatsRoot().getImmutableStats());
assertEquals(0, cacheStatsHolder.getStatsRoot().children.size());
// assertEquals(0, cacheStatsHolder.getStatsRoot().getChildren().size());
}
}

Expand Down Expand Up @@ -280,11 +287,19 @@ static Map<List<String>, CacheStats> populateStats(
threadRand.nextInt(5000),
threadRand.nextInt(10)
);
expected.get(dimensions).hits.inc(statsToInc.getHits());
expected.get(dimensions).misses.inc(statsToInc.getMisses());
expected.get(dimensions).evictions.inc(statsToInc.getEvictions());
expected.get(dimensions).sizeInBytes.inc(statsToInc.getSizeInBytes());
expected.get(dimensions).items.inc(statsToInc.getItems());
for (int iter = 0; iter < statsToInc.getHits(); iter++) {
expected.get(dimensions).incrementHits();
}
for (int iter = 0; iter < statsToInc.getMisses(); iter++) {
expected.get(dimensions).incrementMisses();
}
for (int iter = 0; iter < statsToInc.getEvictions(); iter++) {
expected.get(dimensions).incrementEvictions();
}
expected.get(dimensions).incrementSizeInBytes(statsToInc.getSizeInBytes());
for (int iter = 0; iter < statsToInc.getItems(); iter++) {
expected.get(dimensions).incrementItems();
}
populateStatsHolderFromStatsValueMap(cacheStatsHolder, Map.of(dimensions, statsToInc), diskTierEnabled);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,11 +61,11 @@
import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_ONHEAP_STORE_SIZE;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TIERED_SPILLOVER_SEGMENTS;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheSettings.TOOK_TIME_POLICY_CONCRETE_SETTINGS_MAP;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.cache.common.tier.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;
import static org.opensearch.common.cache.settings.CacheSettings.INVALID_SEGMENT_COUNT_EXCEPTION_MESSAGE;
import static org.opensearch.common.cache.settings.CacheSettings.VALID_SEGMENT_COUNT_VALUES;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_NAME;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_DISK;
import static org.opensearch.common.cache.stats.TieredSpilloverCacheStatsHolder.TIER_DIMENSION_VALUE_ON_HEAP;
import static org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings.MAXIMUM_SIZE_IN_BYTES_KEY;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doThrow;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ public class DefaultCacheStatsHolder implements CacheStatsHolder {
// Non-leaf nodes have stats matching the sum of their children.
// We use a tree structure, rather than a map with concatenated keys, to save on memory usage. If there are many leaf
// nodes that share a parent, that parent's dimension value will only be stored once, not many times.
private final Node statsRoot;
protected final Node statsRoot;
// To avoid sync problems, obtain a lock before creating or removing nodes in the stats tree.
// No lock is needed to edit stats on existing nodes.
protected final Lock lock = new ReentrantLock();
Expand Down Expand Up @@ -190,8 +190,10 @@ public void removeDimensions(List<String> dimensionValues) {
// Returns a CacheStatsCounterSnapshot object for the stats to decrement if the removal happened, null otherwise.
protected ImmutableCacheStats removeDimensionsHelper(List<String> dimensionValues, Node node, int depth) {
if (depth == dimensionValues.size()) {
// Remove children, if present.
node.children.clear();
// Pass up a snapshot of the original stats to avoid issues when the original is decremented by other fn invocations
return removeDimensionsBaseCase(node);
return node.getImmutableStats();
}
Node child = node.getChild(dimensionValues.get(depth));
if (child == null) {
Expand All @@ -208,19 +210,15 @@ protected ImmutableCacheStats removeDimensionsHelper(List<String> dimensionValue
return statsToDecrement;
}

protected ImmutableCacheStats removeDimensionsBaseCase(Node node) {
return node.getImmutableStats();
}

// pkg-private for testing
Node getStatsRoot() {
public Node getStatsRoot() {
return statsRoot;
}

/**
* Nodes that make up the tree in the stats holder.
*/
protected static class Node {
public static class Node {
private final String dimensionValue;
// Map from dimensionValue to the DimensionNode for that dimension value.
final Map<String, Node> children;
Expand All @@ -245,7 +243,7 @@ public String getDimensionValue() {
return dimensionValue;
}

protected Map<String, Node> getChildren() {
public Map<String, Node> getChildren() {
// We can safely iterate over ConcurrentHashMap without worrying about thread issues.
return children;
}
Expand Down Expand Up @@ -284,7 +282,7 @@ long getEntries() {
return this.stats.getItems();
}

ImmutableCacheStats getImmutableStats() {
public ImmutableCacheStats getImmutableStats() {
return this.stats.immutableSnapshot();
}

Expand Down

0 comments on commit f85b719

Please sign in to comment.