diff --git a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java index 7c7c700728074..516b57342ebb9 100644 --- a/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java +++ b/plugins/cache-ehcache/src/main/java/org/opensearch/cache/store/disk/EhcacheDiskCache.java @@ -162,7 +162,7 @@ private EhcacheDiskCache(Builder builder) { this.ehCacheEventListener = new EhCacheEventListener(builder.getRemovalListener(), builder.getWeigher()); this.cache = buildCache(Duration.ofMillis(expireAfterAccess.getMillis()), builder); List dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - this.statsHolder = new StatsHolder(dimensionNames); + this.statsHolder = new StatsHolder(dimensionNames, EhcacheDiskCacheFactory.EHCACHE_DISK_CACHE_NAME); } @SuppressWarnings({ "rawtypes" }) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java new file mode 100644 index 0000000000000..e96dc68c2c4e4 --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/indices/CacheStatsAPIIndicesRequestCacheIT.java @@ -0,0 +1,237 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.action.search.SearchResponse; +import org.opensearch.client.Client; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.service.NodeCacheStats; +import org.opensearch.common.cache.stats.CacheStatsCounterSnapshot; +import org.opensearch.common.cache.stats.MultiDimensionCacheStatsTests; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; +import org.opensearch.index.query.QueryBuilders; +import org.opensearch.test.OpenSearchIntegTestCase; +import org.opensearch.test.ParameterizedStaticSettingsOpenSearchIntegTestCase; +import org.opensearch.test.hamcrest.OpenSearchAssertions; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; + +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; +import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertSearchResponse; + +// Use a single data node to simplify logic about cache stats across different shards. +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 1) +public class CacheStatsAPIIndicesRequestCacheIT extends ParameterizedStaticSettingsOpenSearchIntegTestCase { + public CacheStatsAPIIndicesRequestCacheIT(Settings settings) { + super(settings); + } + + @ParametersFactory + public static Collection parameters() { + return Arrays.asList( + new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "true").build() }, + new Object[] { Settings.builder().put(FeatureFlags.PLUGGABLE_CACHE, "false").build() } + ); + } + + public void testCacheStatsAPIWIthOnHeapCache() throws Exception { + String index1Name = "index1"; + String index2Name = "index2"; + Client client = client(); + + startIndex(client, index1Name); + startIndex(client, index2Name); + + // Search twice for the same doc in index 1 + for (int i = 0; i < 2; i++) { + SearchResponse resp = client.prepareSearch(index1Name) + .setRequestCache(true) + .setQuery(QueryBuilders.termQuery("k", "hello")) + .get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + assertEquals(1, resp.getHits().getTotalHits().value); + } + + // Search once for a doc in index 2 + SearchResponse resp = client.prepareSearch(index2Name).setRequestCache(true).setQuery(QueryBuilders.termQuery("k", "hello")).get(); + assertSearchResponse(resp); + OpenSearchAssertions.assertAllSuccessful(resp); + assertEquals(1, resp.getHits().getTotalHits().value); + + // First, aggregate by indices only + Map xContentMap = getNodeCacheStatsXContentMap(client, "", List.of(IndicesRequestCache.INDEX_DIMENSION_NAME)); + + List index1Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.INDEX_DIMENSION_NAME, + index1Name + ); + // Since we searched twice, we expect to see 1 hit, 1 miss and 1 entry for index 1 + CacheStatsCounterSnapshot expectedStats = new CacheStatsCounterSnapshot(1, 1, 0, 0, 1); + checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, false); + // Get the request size for one request, so we can reuse it for next index + int requestSize = (int) ((Map) MultiDimensionCacheStatsTests.getValueFromNestedXContentMap(xContentMap, index1Keys)) + .get(CacheStatsCounterSnapshot.Fields.MEMORY_SIZE_IN_BYTES); + assertTrue(requestSize > 0); + + List index2Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.INDEX_DIMENSION_NAME, + index2Name + ); + // We searched once in index 2, we expect 1 miss + 1 entry + expectedStats = new CacheStatsCounterSnapshot(0, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true); + + // The total stats for the node should be 1 hit, 2 misses, and 2 entries + expectedStats = new CacheStatsCounterSnapshot(1, 2, 0, 2 * requestSize, 2); + List totalStatsKeys = List.of(CacheType.INDICES_REQUEST_CACHE.getApiRepresentation()); + checkCacheStatsAPIResponse(xContentMap, totalStatsKeys, expectedStats, true); + + // Aggregate by shards only + xContentMap = getNodeCacheStatsXContentMap(client, "", List.of(IndicesRequestCache.SHARD_ID_DIMENSION_NAME)); + + List index1Shard0Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.SHARD_ID_DIMENSION_NAME, + "[" + index1Name + "][0]" + ); + + expectedStats = new CacheStatsCounterSnapshot(1, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index1Shard0Keys, expectedStats, true); + + List index2Shard0Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.SHARD_ID_DIMENSION_NAME, + "[" + index2Name + "][0]" + ); + expectedStats = new CacheStatsCounterSnapshot(0, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index2Shard0Keys, expectedStats, true); + + // Aggregate by indices and shards + xContentMap = getNodeCacheStatsXContentMap( + client, + "", + List.of(IndicesRequestCache.INDEX_DIMENSION_NAME, IndicesRequestCache.SHARD_ID_DIMENSION_NAME) + ); + + index1Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.INDEX_DIMENSION_NAME, + index1Name, + IndicesRequestCache.SHARD_ID_DIMENSION_NAME, + "[" + index1Name + "][0]" + ); + + expectedStats = new CacheStatsCounterSnapshot(1, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index1Keys, expectedStats, true); + + index2Keys = List.of( + CacheType.INDICES_REQUEST_CACHE.getApiRepresentation(), + IndicesRequestCache.INDEX_DIMENSION_NAME, + index2Name, + IndicesRequestCache.SHARD_ID_DIMENSION_NAME, + "[" + index2Name + "][0]" + ); + + expectedStats = new CacheStatsCounterSnapshot(0, 1, 0, requestSize, 1); + checkCacheStatsAPIResponse(xContentMap, index2Keys, expectedStats, true); + + } + + // TODO: Add testCacheStatsAPIWithTieredCache when TSC stats implementation PR is merged + + private void startIndex(Client client, String indexName) throws InterruptedException { + assertAcked( + client.admin() + .indices() + .prepareCreate(indexName) + .setMapping("k", "type=keyword") + .setSettings( + Settings.builder() + .put(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING.getKey(), true) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .get() + ); + indexRandom(true, client.prepareIndex(indexName).setSource("k", "hello")); + ensureSearchable(indexName); + } + + private static Map getNodeCacheStatsXContentMap(Client client, String nodeId, List aggregationLevels) + throws IOException { + + CommonStatsFlags statsFlags = new CommonStatsFlags(); + statsFlags.includeAllCacheTypes(); + + NodesStatsResponse nodeStatsResponse = client.admin() + .cluster() + .prepareNodesStats("data:true") + .addMetric(NodesStatsRequest.Metric.CACHE_STATS.metricName()) + .setIndices(statsFlags) + .get(); + // Can always get the first data node as there's only one in this test suite + assertEquals(1, nodeStatsResponse.getNodes().size()); + NodeCacheStats ncs = nodeStatsResponse.getNodes().get(0).getNodeCacheStats(); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + Map paramMap = Map.of("level", String.join(",", aggregationLevels)); + ToXContent.Params params = new ToXContent.MapParams(paramMap); + + builder.startObject(); + ncs.toXContent(builder, params); + builder.endObject(); + + String resultString = builder.toString(); + return XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); + } + + private static void checkCacheStatsAPIResponse( + Map xContentMap, + List xContentMapKeys, + CacheStatsCounterSnapshot expectedStats, + boolean checkMemorySize + ) { + // Assumes the keys point to a level whose keys are the field values ("size_in_bytes", "evictions", etc) and whose values store + // those stats + Map aggregatedStatsResponse = (Map) MultiDimensionCacheStatsTests.getValueFromNestedXContentMap( + xContentMap, + xContentMapKeys + ); + assertNotNull(aggregatedStatsResponse); + assertEquals(expectedStats.getHits(), (int) aggregatedStatsResponse.get(CacheStatsCounterSnapshot.Fields.HIT_COUNT)); + assertEquals(expectedStats.getMisses(), (int) aggregatedStatsResponse.get(CacheStatsCounterSnapshot.Fields.MISS_COUNT)); + assertEquals(expectedStats.getEvictions(), (int) aggregatedStatsResponse.get(CacheStatsCounterSnapshot.Fields.EVICTIONS)); + if (checkMemorySize) { + assertEquals( + expectedStats.getSizeInBytes(), + (int) aggregatedStatsResponse.get(CacheStatsCounterSnapshot.Fields.MEMORY_SIZE_IN_BYTES) + ); + } + assertEquals(expectedStats.getEntries(), (int) aggregatedStatsResponse.get(CacheStatsCounterSnapshot.Fields.ENTRIES)); + } +} diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java index ec5637cec6485..0e433f87451b1 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesRequestCacheIT.java @@ -694,5 +694,4 @@ private static void assertCacheState(Client client, String index, long expectedH ); } - } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java index 8562a7eb37709..ac2daf57f248b 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodeStats.java @@ -39,6 +39,7 @@ import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; import org.opensearch.common.Nullable; +import org.opensearch.common.cache.service.NodeCacheStats; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.indices.breaker.AllCircuitBreakerStats; @@ -158,6 +159,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContentFragment { @Nullable private AdmissionControlStats admissionControlStats; + @Nullable + private NodeCacheStats nodeCacheStats; + public NodeStats(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); @@ -234,6 +238,11 @@ public NodeStats(StreamInput in) throws IOException { } else { admissionControlStats = null; } + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + nodeCacheStats = in.readOptionalWriteable(NodeCacheStats::new); + } else { + nodeCacheStats = null; + } } public NodeStats( @@ -264,7 +273,8 @@ public NodeStats( @Nullable SearchPipelineStats searchPipelineStats, @Nullable SegmentReplicationRejectionStats segmentReplicationRejectionStats, @Nullable RepositoriesStats repositoriesStats, - @Nullable AdmissionControlStats admissionControlStats + @Nullable AdmissionControlStats admissionControlStats, + @Nullable NodeCacheStats nodeCacheStats ) { super(node); this.timestamp = timestamp; @@ -294,6 +304,7 @@ public NodeStats( this.segmentReplicationRejectionStats = segmentReplicationRejectionStats; this.repositoriesStats = repositoriesStats; this.admissionControlStats = admissionControlStats; + this.nodeCacheStats = nodeCacheStats; } public long getTimestamp() { @@ -451,6 +462,11 @@ public AdmissionControlStats getAdmissionControlStats() { return admissionControlStats; } + @Nullable + public NodeCacheStats getNodeCacheStats() { + return nodeCacheStats; + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); @@ -506,6 +522,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_2_12_0)) { out.writeOptionalWriteable(admissionControlStats); } + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeOptionalWriteable(nodeCacheStats); + } } @Override @@ -609,6 +628,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws if (getAdmissionControlStats() != null) { getAdmissionControlStats().toXContent(builder, params); } + if (getNodeCacheStats() != null) { + getNodeCacheStats().toXContent(builder, params); + } return builder; } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java index 1af56f10b95ee..379836cf442e3 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -219,7 +219,8 @@ public enum Metric { RESOURCE_USAGE_STATS("resource_usage_stats"), SEGMENT_REPLICATION_BACKPRESSURE("segment_replication_backpressure"), REPOSITORIES("repositories"), - ADMISSION_CONTROL("admission_control"); + ADMISSION_CONTROL("admission_control"), + CACHE_STATS("caches"); private String metricName; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 1df73d3b4394d..2e93e5e7841cb 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -128,7 +128,8 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { NodesStatsRequest.Metric.RESOURCE_USAGE_STATS.containedIn(metrics), NodesStatsRequest.Metric.SEGMENT_REPLICATION_BACKPRESSURE.containedIn(metrics), NodesStatsRequest.Metric.REPOSITORIES.containedIn(metrics), - NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics) + NodesStatsRequest.Metric.ADMISSION_CONTROL.containedIn(metrics), + NodesStatsRequest.Metric.CACHE_STATS.containedIn(metrics) ); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 9c5dcc9e9de3f..e4f483f796f44 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -172,6 +172,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq false, false, false, + false, false ); List shardsStats = new ArrayList<>(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index a7d9f95b80f7b..cbde1637ea575 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -34,6 +34,7 @@ import org.opensearch.Version; import org.opensearch.common.annotation.PublicApi; +import org.opensearch.common.cache.CacheType; import org.opensearch.core.common.Strings; import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; @@ -63,6 +64,9 @@ public class CommonStatsFlags implements Writeable, Cloneable { private boolean includeAllShardIndexingPressureTrackers = false; private boolean includeOnlyTopIndexingPressureMetrics = false; + // Used for metric CACHE_STATS, to determine which caches to report stats for + private EnumSet includeCaches = EnumSet.noneOf(CacheType.class); + /** * @param flags flags to set. If no flags are supplied, default flags will be set. */ @@ -91,6 +95,9 @@ public CommonStatsFlags(StreamInput in) throws IOException { includeUnloadedSegments = in.readBoolean(); includeAllShardIndexingPressureTrackers = in.readBoolean(); includeOnlyTopIndexingPressureMetrics = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + includeCaches = in.readEnumSet(CacheType.class); + } } @Override @@ -111,6 +118,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeUnloadedSegments); out.writeBoolean(includeAllShardIndexingPressureTrackers); out.writeBoolean(includeOnlyTopIndexingPressureMetrics); + if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeEnumSet(includeCaches); + } } /** @@ -125,6 +135,7 @@ public CommonStatsFlags all() { includeUnloadedSegments = false; includeAllShardIndexingPressureTrackers = false; includeOnlyTopIndexingPressureMetrics = false; + includeCaches = EnumSet.noneOf(CacheType.class); return this; } @@ -140,6 +151,7 @@ public CommonStatsFlags clear() { includeUnloadedSegments = false; includeAllShardIndexingPressureTrackers = false; includeOnlyTopIndexingPressureMetrics = false; + includeCaches = EnumSet.noneOf(CacheType.class); return this; } @@ -151,6 +163,10 @@ public Flag[] getFlags() { return flags.toArray(new Flag[0]); } + public EnumSet getIncludeCaches() { + return includeCaches; + } + /** * Sets specific search group stats to retrieve the stats for. Mainly affects search * when enabled. @@ -206,6 +222,16 @@ public CommonStatsFlags includeOnlyTopIndexingPressureMetrics(boolean includeOnl return this; } + public CommonStatsFlags includeCacheType(CacheType cacheType) { + includeCaches.add(cacheType); + return this; + } + + public CommonStatsFlags includeAllCacheTypes() { + includeCaches = EnumSet.allOf(CacheType.class); + return this; + } + public boolean includeUnloadedSegments() { return this.includeUnloadedSegments; } diff --git a/server/src/main/java/org/opensearch/common/cache/CacheType.java b/server/src/main/java/org/opensearch/common/cache/CacheType.java index c5aeb7cd1fa40..61442db148067 100644 --- a/server/src/main/java/org/opensearch/common/cache/CacheType.java +++ b/server/src/main/java/org/opensearch/common/cache/CacheType.java @@ -10,20 +10,46 @@ import org.opensearch.common.annotation.ExperimentalApi; +import java.util.HashSet; +import java.util.Set; + /** * Cache types available within OpenSearch. */ @ExperimentalApi public enum CacheType { - INDICES_REQUEST_CACHE("indices.requests.cache"); + INDICES_REQUEST_CACHE("indices.requests.cache", "request_cache"); private final String settingPrefix; + private final String apiRepresentation; - CacheType(String settingPrefix) { + CacheType(String settingPrefix, String representation) { this.settingPrefix = settingPrefix; + this.apiRepresentation = representation; } public String getSettingPrefix() { return settingPrefix; } + + public String getApiRepresentation() { + return apiRepresentation; + } + + public static CacheType getByRepresentation(String representation) { + for (CacheType cacheType : values()) { + if (cacheType.apiRepresentation.equals(representation)) { + return cacheType; + } + } + throw new IllegalArgumentException("No CacheType with representation = " + representation); + } + + public static Set allRepresentations() { + Set reprs = new HashSet<>(); + for (CacheType cacheType : values()) { + reprs.add(cacheType.apiRepresentation); + } + return reprs; + } } diff --git a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java index b6710e5e4b424..e2b947fbd1040 100644 --- a/server/src/main/java/org/opensearch/common/cache/service/CacheService.java +++ b/server/src/main/java/org/opensearch/common/cache/service/CacheService.java @@ -8,10 +8,12 @@ package org.opensearch.common.cache.service; +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.common.annotation.ExperimentalApi; import org.opensearch.common.cache.CacheType; import org.opensearch.common.cache.ICache; import org.opensearch.common.cache.settings.CacheSettings; +import org.opensearch.common.cache.stats.CacheStats; import org.opensearch.common.cache.store.OpenSearchOnHeapCache; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.settings.Setting; @@ -62,4 +64,12 @@ public ICache createCache(CacheConfig config, CacheType cache cacheTypeMap.put(cacheType, iCache); return iCache; } + + public NodeCacheStats stats(CommonStatsFlags flags) { + Map statsMap = new HashMap<>(); + for (CacheType type : cacheTypeMap.keySet()) { + statsMap.put(type, cacheTypeMap.get(type).stats()); + } + return new NodeCacheStats(statsMap, flags); + } } diff --git a/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java new file mode 100644 index 0000000000000..4d0e8c13f6128 --- /dev/null +++ b/server/src/main/java/org/opensearch/common/cache/service/NodeCacheStats.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.cache.service; + +import org.opensearch.action.admin.indices.stats.CommonStatsFlags; +import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.stats.CacheStats; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.TreeMap; + +/** + * A class creating XContent responses to cache stats API requests. + * + * @opensearch.experimental + */ +@ExperimentalApi +public class NodeCacheStats implements ToXContentFragment, Writeable { + private final TreeMap statsByCache; + private final CommonStatsFlags flags; + + public NodeCacheStats(Map statsByCache, CommonStatsFlags flags) { + this.statsByCache = new TreeMap<>(statsByCache); // Use TreeMap to force consistent ordering of caches in API responses + this.flags = flags; + } + + public NodeCacheStats(StreamInput in) throws IOException { + this.flags = new CommonStatsFlags(in); + Map readMap = in.readMap(i -> i.readEnum(CacheType.class), CacheStats::readFromStreamWithClassName); + this.statsByCache = new TreeMap<>(readMap); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + flags.writeTo(out); + out.writeMap(statsByCache, StreamOutput::writeEnum, (o, cacheStats) -> cacheStats.writeToWithClassName(o)); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + for (CacheType type : statsByCache.keySet()) { + if (flags.getIncludeCaches().contains(type)) { + builder.startObject(type.getApiRepresentation()); + statsByCache.get(type).toXContent(builder, params); + builder.endObject(); + } + } + return builder; + } + + @Override + public boolean equals(Object o) { + if (o == null) { + return false; + } + if (o.getClass() != NodeCacheStats.class) { + return false; + } + NodeCacheStats other = (NodeCacheStats) o; + return statsByCache.equals(other.statsByCache) && flags.getIncludeCaches().equals(other.flags.getIncludeCaches()); + } + + @Override + public int hashCode() { + return Objects.hash(statsByCache, flags); + } +} diff --git a/server/src/main/java/org/opensearch/common/cache/stats/CacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/CacheStats.java index e2937abd8ae93..1e4ae24eb88a1 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/CacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/CacheStats.java @@ -9,6 +9,12 @@ package org.opensearch.common.cache.stats; import org.opensearch.common.annotation.ExperimentalApi; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.xcontent.ToXContentFragment; + +import java.io.IOException; /** * Interface for access to any cache stats. Allows accessing stats by dimension values. @@ -17,7 +23,7 @@ * @opensearch.experimental */ @ExperimentalApi -public interface CacheStats { // TODO: also extends Writeable, ToXContentFragment (in API PR) +public interface CacheStats extends Writeable, ToXContentFragment { // Method to get all 5 values at once CacheStatsCounterSnapshot getTotalStats(); @@ -32,4 +38,18 @@ public interface CacheStats { // TODO: also extends Writeable, ToXContentFragmen long getTotalSizeInBytes(); long getTotalEntries(); + + // Used for the readFromStream method to allow deserialization of generic CacheStats objects. + String getClassName(); + + void writeToWithClassName(StreamOutput out) throws IOException; + + static CacheStats readFromStreamWithClassName(StreamInput in) throws IOException { + String className = in.readString(); + + if (className.equals(MultiDimensionCacheStats.CLASS_NAME)) { + return new MultiDimensionCacheStats(in); + } + return null; + } } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsCounterSnapshot.java b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsCounterSnapshot.java index 3057edd8b2afc..399197239df9d 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsCounterSnapshot.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/CacheStatsCounterSnapshot.java @@ -12,6 +12,9 @@ import org.opensearch.core.common.io.stream.StreamInput; import org.opensearch.core.common.io.stream.StreamOutput; import org.opensearch.core.common.io.stream.Writeable; +import org.opensearch.core.common.unit.ByteSizeValue; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; @@ -22,7 +25,7 @@ * @opensearch.experimental */ @ExperimentalApi -public class CacheStatsCounterSnapshot implements Writeable { // TODO: Make this extend ToXContent (in API PR) +public class CacheStatsCounterSnapshot implements Writeable, ToXContent { private final long hits; private final long misses; private final long evictions; @@ -100,4 +103,26 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(hits, misses, evictions, sizeInBytes, entries); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // We don't write the header in CacheStatsResponse's toXContent, because it doesn't know the name of aggregation it's part of + builder.humanReadableField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, new ByteSizeValue(sizeInBytes)); + builder.field(Fields.EVICTIONS, evictions); + builder.field(Fields.HIT_COUNT, hits); + builder.field(Fields.MISS_COUNT, misses); + builder.field(Fields.ENTRIES, entries); + return builder; + } + + public static final class Fields { + public static final String MEMORY_SIZE = "size"; // TODO: Bad name - think of something better + public static final String MEMORY_SIZE_IN_BYTES = "size_in_bytes"; + // TODO: This might not be memory as it could be partially on disk, so I've changed it, but should it be consistent with the earlier + // field? + public static final String EVICTIONS = "evictions"; + public static final String HIT_COUNT = "hit_count"; + public static final String MISS_COUNT = "miss_count"; + public static final String ENTRIES = "entries"; + } } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/MultiDimensionCacheStats.java b/server/src/main/java/org/opensearch/common/cache/stats/MultiDimensionCacheStats.java index 3fc5d54b5dcbe..4db418b05f9d0 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/MultiDimensionCacheStats.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/MultiDimensionCacheStats.java @@ -8,9 +8,16 @@ package org.opensearch.common.cache.stats; +import org.opensearch.core.common.io.stream.StreamInput; +import org.opensearch.core.common.io.stream.StreamOutput; +import org.opensearch.core.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.TreeMap; /** @@ -25,9 +32,92 @@ public class MultiDimensionCacheStats implements CacheStats { final MDCSDimensionNode statsRoot; final List dimensionNames; - public MultiDimensionCacheStats(MDCSDimensionNode statsRoot, List dimensionNames) { + // The name of the cache type producing these stats. Returned in API response. + final String storeName; + public static String STORE_NAME_FIELD = "store_name"; + + public static String CLASS_NAME = "multidimension"; + + public MultiDimensionCacheStats(MDCSDimensionNode statsRoot, List dimensionNames, String storeName) { this.statsRoot = statsRoot; this.dimensionNames = dimensionNames; + this.storeName = storeName; + } + + /** + * Should not be used with StreamOutputs produced using writeToWithClassName. + */ + public MultiDimensionCacheStats(StreamInput in) throws IOException { + // Because we write in preorder order, the parent of the next node we read will always be one of the ancestors + // of the last node we read. This allows us to avoid ambiguity if nodes have the same dimension value, without + // having to serialize the whole path to each node. + this.dimensionNames = List.of(in.readStringArray()); + this.statsRoot = new MDCSDimensionNode("", true); + List ancestorsOfLastRead = List.of(statsRoot); + while (ancestorsOfLastRead != null) { + ancestorsOfLastRead = readAndAttachDimensionNode(in, ancestorsOfLastRead); + } + // Finally, update sum-of-children stats for the root node + CacheStatsCounter totalStats = new CacheStatsCounter(); + for (MDCSDimensionNode child : statsRoot.children.values()) { + totalStats.add(child.getStats()); + } + statsRoot.setStats(totalStats.snapshot()); + this.storeName = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + // Write each node in preorder order, along with its depth. + // Then, when rebuilding the tree from the stream, we can always find the correct parent to attach each node to. + out.writeStringArray(dimensionNames.toArray(new String[0])); + for (MDCSDimensionNode child : statsRoot.children.values()) { + writeDimensionNodeRecursive(out, child, 1); + } + out.writeBoolean(false); // Write false to signal there are no more nodes + out.writeString(storeName); + } + + private void writeDimensionNodeRecursive(StreamOutput out, MDCSDimensionNode node, int depth) throws IOException { + out.writeBoolean(true); // Signals there is a following node to deserialize + out.writeVInt(depth); + out.writeString(node.getDimensionValue()); + node.getStats().writeTo(out); + + if (!node.children.isEmpty()) { + // Not a leaf node + out.writeBoolean(true); // Write true to indicate we should re-create a map on deserialization + for (MDCSDimensionNode child : node.children.values()) { + writeDimensionNodeRecursive(out, child, depth + 1); + } + } else { + out.writeBoolean(false); // Write false to indicate we should not re-create a map on deserialization + } + } + + /** + * Reads a serialized dimension node, attaches it to its appropriate place in the tree, and returns the list of + * ancestors of the newly attached node. + */ + private List readAndAttachDimensionNode(StreamInput in, List ancestorsOfLastRead) + throws IOException { + boolean hasNextNode = in.readBoolean(); + if (hasNextNode) { + int depth = in.readVInt(); + String nodeDimensionValue = in.readString(); + CacheStatsCounterSnapshot stats = new CacheStatsCounterSnapshot(in); + boolean doRecreateMap = in.readBoolean(); + + MDCSDimensionNode result = new MDCSDimensionNode(nodeDimensionValue, doRecreateMap, stats); + MDCSDimensionNode parent = ancestorsOfLastRead.get(depth - 1); + parent.getChildren().put(nodeDimensionValue, result); + List ancestors = new ArrayList<>(ancestorsOfLastRead.subList(0, depth)); + ancestors.add(result); + return ancestors; + } else { + // No more nodes + return null; + } } @Override @@ -60,6 +150,146 @@ public long getTotalEntries() { return getTotalStats().getEntries(); } + @Override + public String getClassName() { + return CLASS_NAME; + } + + @Override + public void writeToWithClassName(StreamOutput out) throws IOException { + out.writeString(getClassName()); + writeTo(out); + } + + /** + * Returns a new tree containing the stats aggregated by the levels passed in. The root node is a dummy node, + * whose name and value are null. The new tree only has dimensions matching the levels passed in. + */ + MDCSDimensionNode aggregateByLevels(List levels) { + List filteredLevels = filterLevels(levels); + MDCSDimensionNode newRoot = new MDCSDimensionNode("", true, statsRoot.getStats()); + for (MDCSDimensionNode child : statsRoot.children.values()) { + aggregateByLevelsHelper(newRoot, child, filteredLevels, 0); + } + return newRoot; + } + + void aggregateByLevelsHelper( + MDCSDimensionNode parentInNewTree, + MDCSDimensionNode currentInOriginalTree, + List levels, + int depth + ) { + if (levels.contains(dimensionNames.get(depth))) { + // If this node is in a level we want to aggregate, create a new dimension node with the same value and stats, and connect it to + // the last parent node in the new tree. If it already exists, increment it instead. + String dimensionValue = currentInOriginalTree.getDimensionValue(); + MDCSDimensionNode nodeInNewTree = parentInNewTree.children.get(dimensionValue); + if (nodeInNewTree == null) { + // Create new node with stats matching the node from the original tree + int indexOfLastLevel = dimensionNames.indexOf(levels.get(levels.size() - 1)); + boolean isLeafNode = depth == indexOfLastLevel; // If this is the last level we aggregate, the new node should be a leaf + // node + nodeInNewTree = new MDCSDimensionNode(dimensionValue, !isLeafNode, currentInOriginalTree.getStats()); + parentInNewTree.children.put(dimensionValue, nodeInNewTree); + } else { + // Otherwise increment existing stats + CacheStatsCounterSnapshot newStats = CacheStatsCounterSnapshot.addSnapshots( + nodeInNewTree.getStats(), + currentInOriginalTree.getStats() + ); + nodeInNewTree.setStats(newStats); + } + // Finally set the parent node to be this node for the next callers of this function + parentInNewTree = nodeInNewTree; + } + + if (!currentInOriginalTree.children.isEmpty()) { + // Not a leaf node + for (Map.Entry childEntry : currentInOriginalTree.children.entrySet()) { + MDCSDimensionNode child = childEntry.getValue(); + aggregateByLevelsHelper(parentInNewTree, child, levels, depth + 1); + } + } + } + + /** + * Filters out levels that aren't in dimensionNames. Unrecognized levels are ignored. + */ + private List filterLevels(List levels) { + List filtered = new ArrayList<>(); + for (String level : levels) { + if (dimensionNames.contains(level)) { + filtered.add(level); + } + } + if (filtered.isEmpty()) { + throw new IllegalArgumentException("Levels cannot have size 0"); + } + return filtered; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // Always show total stats, regardless of levels + getTotalStats().toXContent(builder, params); + + List levels = getLevels(params); + if (levels == null) { + // display total stats only + return builder; + } + + List filteredLevels = filterLevels(levels); + toXContentForLevels(builder, params, filteredLevels); + // Also add the store name for the cache that produced the stats + builder.field(STORE_NAME_FIELD, storeName); + return builder; + } + + XContentBuilder toXContentForLevels(XContentBuilder builder, Params params, List levels) throws IOException { + MDCSDimensionNode aggregated = aggregateByLevels(levels); + // Depth -1 corresponds to the dummy root node, which has no dimension value and only has children + toXContentForLevelsHelper(-1, aggregated, levels, builder, params); + return builder; + + } + + private void toXContentForLevelsHelper( + int depth, + MDCSDimensionNode current, + List levels, + XContentBuilder builder, + Params params + ) throws IOException { + if (depth >= 0) { + builder.startObject(current.dimensionValue); + } + + if (depth == levels.size() - 1) { + // This is a leaf node + current.getStats().toXContent(builder, params); + } else { + builder.startObject(levels.get(depth + 1)); + for (MDCSDimensionNode nextNode : current.children.values()) { + toXContentForLevelsHelper(depth + 1, nextNode, levels, builder, params); + } + builder.endObject(); + } + + if (depth >= 0) { + builder.endObject(); + } + } + + private List getLevels(Params params) { + String levels = params.param("level"); + if (levels == null) { + return null; + } + return List.of(levels.split(",")); + } + public CacheStatsCounterSnapshot getStatsForDimensionValues(List dimensionValues) { MDCSDimensionNode current = statsRoot; for (String dimensionValue : dimensionValues) { @@ -117,5 +347,45 @@ MDCSDimensionNode getStatsRoot() { return statsRoot; } - // TODO (in API PR): Produce XContent based on aggregateByLevels() + @Override + public boolean equals(Object o) { + if (o == null || o.getClass() != MultiDimensionCacheStats.class) { + return false; + } + MultiDimensionCacheStats other = (MultiDimensionCacheStats) o; + if (!dimensionNames.equals(other.dimensionNames) || !storeName.equals(other.storeName)) { + return false; + } + return equalsHelper(statsRoot, other.getStatsRoot()); + } + + private boolean equalsHelper(MDCSDimensionNode thisNode, MDCSDimensionNode otherNode) { + if (!thisNode.getStats().equals(otherNode.getStats())) { + return false; + } + if (thisNode.children == null && otherNode.children == null) { + // TODO: Simplify this logic once we inherit from normal DimensionNode and have the static empty map thing + return true; + } + if (thisNode.children == null + || otherNode.children == null + || !thisNode.getChildren().keySet().equals(otherNode.getChildren().keySet())) { + return false; + } + boolean allChildrenMatch = true; + for (String childValue : thisNode.getChildren().keySet()) { + allChildrenMatch = equalsHelper(thisNode.children.get(childValue), otherNode.children.get(childValue)); + if (!allChildrenMatch) { + return false; + } + } + return allChildrenMatch; + } + + @Override + public int hashCode() { + // Should be sufficient to hash based on the total stats value (found in the root node) + return Objects.hash(statsRoot.stats, dimensionNames); + } + } diff --git a/server/src/main/java/org/opensearch/common/cache/stats/StatsHolder.java b/server/src/main/java/org/opensearch/common/cache/stats/StatsHolder.java index 09174055770da..b1be4baee348c 100644 --- a/server/src/main/java/org/opensearch/common/cache/stats/StatsHolder.java +++ b/server/src/main/java/org/opensearch/common/cache/stats/StatsHolder.java @@ -8,6 +8,8 @@ package org.opensearch.common.cache.stats; +import org.opensearch.common.annotation.ExperimentalApi; + import java.util.Collections; import java.util.List; import java.util.concurrent.locks.Lock; @@ -26,6 +28,7 @@ * * @opensearch.experimental */ +@ExperimentalApi public class StatsHolder { // The list of permitted dimensions. Should be ordered from "outermost" to "innermost", as you would like to @@ -40,8 +43,12 @@ public class StatsHolder { // No lock is needed to edit stats on existing nodes. private final Lock lock = new ReentrantLock(); - public StatsHolder(List dimensionNames) { + // The name of the cache type using these stats + private final String storeName; + + public StatsHolder(List dimensionNames, String storeName) { this.dimensionNames = Collections.unmodifiableList(dimensionNames); + this.storeName = storeName; this.statsRoot = new DimensionNode("", true); // The root node has the empty string as its dimension value } @@ -162,7 +169,7 @@ public CacheStats getCacheStats() { getCacheStatsHelper(child, snapshot); } } - return new MultiDimensionCacheStats(snapshot, dimensionNames); + return new MultiDimensionCacheStats(snapshot, dimensionNames, storeName); } private void getCacheStatsHelper(DimensionNode currentNodeInOriginalTree, MDCSDimensionNode parentInNewTree) { diff --git a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java index 2e60072d07ed2..2a68d83456ace 100644 --- a/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java +++ b/server/src/main/java/org/opensearch/common/cache/store/OpenSearchOnHeapCache.java @@ -62,7 +62,7 @@ public OpenSearchOnHeapCache(Builder builder) { } cache = cacheBuilder.build(); this.dimensionNames = Objects.requireNonNull(builder.dimensionNames, "Dimension names can't be null"); - this.statsHolder = new StatsHolder(dimensionNames); + this.statsHolder = new StatsHolder(dimensionNames, OpenSearchOnHeapCacheFactory.NAME); this.removalListener = builder.getRemovalListener(); this.weigher = builder.getWeigher(); } diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 7fa2b6c8ff497..628381beda3f9 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -1180,7 +1180,8 @@ protected Node( resourceUsageCollectorService, segmentReplicationStatsTracker, repositoryService, - admissionControlService + admissionControlService, + cacheService ); final SearchService searchService = newSearchService( diff --git a/server/src/main/java/org/opensearch/node/NodeService.java b/server/src/main/java/org/opensearch/node/NodeService.java index 15cc8f3d20bb3..1eb38ea63ad5a 100644 --- a/server/src/main/java/org/opensearch/node/NodeService.java +++ b/server/src/main/java/org/opensearch/node/NodeService.java @@ -41,6 +41,7 @@ import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; +import org.opensearch.common.cache.service.CacheService; import org.opensearch.common.settings.Settings; import org.opensearch.common.settings.SettingsFilter; import org.opensearch.common.util.io.IOUtils; @@ -99,6 +100,7 @@ public class NodeService implements Closeable { private final RepositoriesService repositoriesService; private final AdmissionControlService admissionControlService; private final SegmentReplicationStatsTracker segmentReplicationStatsTracker; + private final CacheService cacheService; NodeService( Settings settings, @@ -125,7 +127,8 @@ public class NodeService implements Closeable { ResourceUsageCollectorService resourceUsageCollectorService, SegmentReplicationStatsTracker segmentReplicationStatsTracker, RepositoriesService repositoriesService, - AdmissionControlService admissionControlService + AdmissionControlService admissionControlService, + CacheService cacheService ) { this.settings = settings; this.threadPool = threadPool; @@ -154,6 +157,7 @@ public class NodeService implements Closeable { clusterService.addStateApplier(ingestService); clusterService.addStateApplier(searchPipelineService); this.segmentReplicationStatsTracker = segmentReplicationStatsTracker; + this.cacheService = cacheService; } public NodeInfo info( @@ -236,7 +240,8 @@ public NodeStats stats( boolean resourceUsageStats, boolean segmentReplicationTrackerStats, boolean repositoriesStats, - boolean admissionControl + boolean admissionControl, + boolean cacheService ) { // for indices stats we want to include previous allocated shards stats as well (it will // only be applied to the sensible ones to use, like refresh/merge/flush/indexing stats) @@ -268,7 +273,8 @@ public NodeStats stats( searchPipelineStats ? this.searchPipelineService.stats() : null, segmentReplicationTrackerStats ? this.segmentReplicationStatsTracker.getTotalRejectionStats() : null, repositoriesStats ? this.repositoriesService.getRepositoriesStats() : null, - admissionControl ? this.admissionControlService.stats() : null + admissionControl ? this.admissionControlService.stats() : null, + cacheService ? this.cacheService.stats(indices) : null ); } diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java index 66b9afda06eb6..f62eaeb37f41f 100644 --- a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestNodesStatsAction.java @@ -36,6 +36,7 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; import org.opensearch.client.node.NodeClient; +import org.opensearch.common.cache.CacheType; import org.opensearch.core.common.Strings; import org.opensearch.rest.BaseRestHandler; import org.opensearch.rest.RestRequest; @@ -175,6 +176,25 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC nodesStatsRequest.indices(flags); } + } else if (metrics.contains("caches")) { + // Extract the list of caches we want to get stats for from the submetrics (which we get from index_metric) + Set cacheMetrics = Strings.tokenizeByCommaToSet(request.param("index_metric", "_all")); + CommonStatsFlags cacheFlags = new CommonStatsFlags(); + cacheFlags.clear(); + if (cacheMetrics.size() == 1 && cacheMetrics.contains("_all")) { + cacheFlags.includeAllCacheTypes(); + } else { + for (String cacheName : cacheMetrics) { + try { + cacheFlags.includeCacheType(CacheType.getByRepresentation(cacheName)); + } catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + unrecognized(request, Set.of(cacheName), CacheType.allRepresentations(), "cache type") + ); + } + } + } + nodesStatsRequest.indices(cacheFlags); } else if (request.hasParam("index_metric")) { throw new IllegalArgumentException( String.format( diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java index 1b8b6243aa805..146a77dfa856e 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/stats/NodeStatsTests.java @@ -42,6 +42,10 @@ import org.opensearch.cluster.routing.WeightedRoutingStats; import org.opensearch.cluster.service.ClusterManagerThrottlingStats; import org.opensearch.cluster.service.ClusterStateStats; +import org.opensearch.common.cache.CacheType; +import org.opensearch.common.cache.service.NodeCacheStats; +import org.opensearch.common.cache.stats.CacheStats; +import org.opensearch.common.cache.stats.StatsHolder; import org.opensearch.common.io.stream.BytesStreamOutput; import org.opensearch.common.metrics.OperationStats; import org.opensearch.common.settings.ClusterSettings; @@ -577,6 +581,13 @@ public void testSerialization() throws IOException { deserializedAdmissionControllerStats.getRejectionCount().get(AdmissionControlActionType.INDEXING.getType()) ); } + NodeCacheStats nodeCacheStats = nodeStats.getNodeCacheStats(); + NodeCacheStats deserializedNodeCacheStats = deserializedNodeStats.getNodeCacheStats(); + if (nodeCacheStats == null) { + assertNull(deserializedNodeCacheStats); + } else { + assertEquals(nodeCacheStats, deserializedNodeCacheStats); + } } } } @@ -928,6 +939,48 @@ public void apply(String action, AdmissionControlActionType admissionControlActi NodeIndicesStats indicesStats = getNodeIndicesStats(remoteStoreStats); + NodeCacheStats nodeCacheStats = null; + if (frequently()) { + int numIndices = randomIntBetween(1, 10); + int numShardsPerIndex = randomIntBetween(1, 50); + // Map snapshotMap = new HashMap<>(); + + List dimensionNames = List.of("index", "shard", "tier"); + StatsHolder statsHolder = new StatsHolder(dimensionNames, "dummyStoreName"); + for (int indexNum = 0; indexNum < numIndices; indexNum++) { + String indexName = "index" + indexNum; + for (int shardNum = 0; shardNum < numShardsPerIndex; shardNum++) { + String shardName = "[" + indexName + "][" + shardNum + "]"; + for (String tierName : new String[] { "dummy_tier_1", "dummy_tier_2" }) { + List dimensionValues = List.of(indexName, shardName, tierName); + for (int i = 0; i < randomInt(20); i++) { + statsHolder.incrementHits(dimensionValues); + } + for (int i = 0; i < randomInt(20); i++) { + statsHolder.incrementMisses(dimensionValues); + } + for (int i = 0; i < randomInt(20); i++) { + statsHolder.incrementEvictions(dimensionValues); + } + statsHolder.incrementSizeInBytes(dimensionValues, randomInt(20)); + for (int i = 0; i < randomInt(20); i++) { + statsHolder.incrementEntries(dimensionValues); + } + } + } + } + CommonStatsFlags flags = new CommonStatsFlags(); + for (CacheType cacheType : CacheType.values()) { + if (frequently()) { + flags.includeCacheType(cacheType); + } + } + CacheStats cacheStats = statsHolder.getCacheStats(); + Map cacheStatsMap = new HashMap<>(); + cacheStatsMap.put(CacheType.INDICES_REQUEST_CACHE, cacheStats); + nodeCacheStats = new NodeCacheStats(cacheStatsMap, flags); + } + // TODO: Only remote_store based aspects of NodeIndicesStats are being tested here. // It is possible to test other metrics in NodeIndicesStats as well since it extends Writeable now return new NodeStats( @@ -958,7 +1011,8 @@ public void apply(String action, AdmissionControlActionType admissionControlActi null, segmentReplicationRejectionStats, null, - admissionControlStats + admissionControlStats, + nodeCacheStats ); } diff --git a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java index ff47ec3015697..5539dd26dd52d 100644 --- a/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/opensearch/cluster/DiskUsageTests.java @@ -194,6 +194,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -224,6 +225,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ), new NodeStats( @@ -254,6 +256,7 @@ public void testFillDiskUsage() { null, null, null, + null, null ) ); @@ -315,6 +318,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -345,6 +349,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ), new NodeStats( @@ -375,6 +380,7 @@ public void testFillDiskUsageSomeInvalidValues() { null, null, null, + null, null ) ); diff --git a/server/src/test/java/org/opensearch/common/cache/stats/MultiDimensionCacheStatsTests.java b/server/src/test/java/org/opensearch/common/cache/stats/MultiDimensionCacheStatsTests.java index 460398961d94f..1a221f8393474 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/MultiDimensionCacheStatsTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/MultiDimensionCacheStatsTests.java @@ -8,16 +8,71 @@ package org.opensearch.common.cache.stats; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.common.bytes.BytesReference; +import org.opensearch.core.common.io.stream.BytesStreamInput; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.BiConsumer; public class MultiDimensionCacheStatsTests extends OpenSearchTestCase { + private final String storeName = "dummy_store"; + + public void testSerialization() throws Exception { + List dimensionNames = List.of("dim1", "dim2", "dim3"); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = StatsHolderTests.getUsedDimensionValues(statsHolder, 10); + StatsHolderTests.populateStats(statsHolder, usedDimensionValues, 100, 10); + MultiDimensionCacheStats stats = (MultiDimensionCacheStats) statsHolder.getCacheStats(); + + BytesStreamOutput os = new BytesStreamOutput(); + stats.writeTo(os); + BytesStreamInput is = new BytesStreamInput(BytesReference.toBytes(os.bytes())); + MultiDimensionCacheStats deserialized = new MultiDimensionCacheStats(is); + + assertEquals(stats.dimensionNames, deserialized.dimensionNames); + assertEquals(stats.storeName, deserialized.storeName); + + os = new BytesStreamOutput(); + stats.writeToWithClassName(os); + is = new BytesStreamInput(BytesReference.toBytes(os.bytes())); + CacheStats deserializedViaCacheStats = CacheStats.readFromStreamWithClassName(is); + assertEquals(MultiDimensionCacheStats.class, deserializedViaCacheStats.getClass()); + + assertEquals(stats, deserialized); + assertEquals(stats, deserializedViaCacheStats); + } + + public void testEquals() throws Exception { + List dimensionNames = List.of("dim1", "dim2", "dim3"); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); + StatsHolder differentStoreNameStatsHolder = new StatsHolder(dimensionNames, "nonMatchingStoreName"); + StatsHolder nonMatchingStatsHolder = new StatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = StatsHolderTests.getUsedDimensionValues(statsHolder, 10); + StatsHolderTests.populateStats(List.of(statsHolder, differentStoreNameStatsHolder), usedDimensionValues, 100, 10); + StatsHolderTests.populateStats(nonMatchingStatsHolder, usedDimensionValues, 100, 10); + MultiDimensionCacheStats stats = (MultiDimensionCacheStats) statsHolder.getCacheStats(); + + MultiDimensionCacheStats secondStats = (MultiDimensionCacheStats) statsHolder.getCacheStats(); + assertEquals(stats, secondStats); + MultiDimensionCacheStats nonMatchingStats = (MultiDimensionCacheStats) nonMatchingStatsHolder.getCacheStats(); + assertNotEquals(stats, nonMatchingStats); + MultiDimensionCacheStats differentStoreNameStats = (MultiDimensionCacheStats) differentStoreNameStatsHolder.getCacheStats(); + assertNotEquals(stats, differentStoreNameStats); + } public void testGet() throws Exception { List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); - StatsHolder statsHolder = new StatsHolder(dimensionNames); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); Map> usedDimensionValues = StatsHolderTests.getUsedDimensionValues(statsHolder, 10); Map, CacheStatsCounter> expected = StatsHolderTests.populateStats(statsHolder, usedDimensionValues, 1000, 10); MultiDimensionCacheStats stats = (MultiDimensionCacheStats) statsHolder.getCacheStats(); @@ -52,7 +107,7 @@ public void testGet() throws Exception { public void testEmptyDimsList() throws Exception { // If the dimension list is empty, the tree should have only the root node containing the total stats. - StatsHolder statsHolder = new StatsHolder(List.of()); + StatsHolder statsHolder = new StatsHolder(List.of(), storeName); Map> usedDimensionValues = StatsHolderTests.getUsedDimensionValues(statsHolder, 100); StatsHolderTests.populateStats(statsHolder, usedDimensionValues, 10, 100); MultiDimensionCacheStats stats = (MultiDimensionCacheStats) statsHolder.getCacheStats(); @@ -62,6 +117,158 @@ public void testEmptyDimsList() throws Exception { assertEquals(stats.getTotalStats(), statsRoot.getStats()); } + public void testAggregateByAllDimensions() throws Exception { + // Aggregating with all dimensions as levels should just give us the same values that were in the original map + List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = StatsHolderTests.getUsedDimensionValues(statsHolder, 10); + Map, CacheStatsCounter> expected = StatsHolderTests.populateStats(statsHolder, usedDimensionValues, 1000, 10); + MultiDimensionCacheStats stats = (MultiDimensionCacheStats) statsHolder.getCacheStats(); + + MultiDimensionCacheStats.MDCSDimensionNode aggregated = stats.aggregateByLevels(dimensionNames); + for (Map.Entry, CacheStatsCounter> expectedEntry : expected.entrySet()) { + List dimensionValues = new ArrayList<>(); + for (String dimValue : expectedEntry.getKey()) { + dimensionValues.add(dimValue); + } + assertEquals(expectedEntry.getValue().snapshot(), getNode(dimensionValues, aggregated).getStats()); + } + assertSumOfChildrenStats(aggregated); + } + + public void testAggregateBySomeDimensions() throws Exception { + List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); + Map> usedDimensionValues = StatsHolderTests.getUsedDimensionValues(statsHolder, 10); + Map, CacheStatsCounter> expected = StatsHolderTests.populateStats(statsHolder, usedDimensionValues, 1000, 10); + MultiDimensionCacheStats stats = (MultiDimensionCacheStats) statsHolder.getCacheStats(); + + for (int i = 0; i < (1 << dimensionNames.size()); i++) { + // Test each combination of possible levels + List levels = new ArrayList<>(); + for (int nameIndex = 0; nameIndex < dimensionNames.size(); nameIndex++) { + if ((i & (1 << nameIndex)) != 0) { + levels.add(dimensionNames.get(nameIndex)); + } + } + if (levels.size() == 0) { + assertThrows(IllegalArgumentException.class, () -> stats.aggregateByLevels(levels)); + } else { + MultiDimensionCacheStats.MDCSDimensionNode aggregated = stats.aggregateByLevels(levels); + Map, MultiDimensionCacheStats.MDCSDimensionNode> aggregatedLeafNodes = getAllLeafNodes(aggregated); + + for (Map.Entry, MultiDimensionCacheStats.MDCSDimensionNode> aggEntry : aggregatedLeafNodes.entrySet()) { + CacheStatsCounter expectedCounter = new CacheStatsCounter(); + for (List expectedDims : expected.keySet()) { + if (expectedDims.containsAll(aggEntry.getKey())) { + expectedCounter.add(expected.get(expectedDims)); + } + } + assertEquals(expectedCounter.snapshot(), aggEntry.getValue().getStats()); + } + assertSumOfChildrenStats(aggregated); + } + } + } + + public void testXContentForLevels() throws Exception { + List dimensionNames = List.of("A", "B", "C"); + + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); + StatsHolderTests.populateStatsHolderFromStatsValueMap( + statsHolder, + Map.of( + List.of("A1", "B1", "C1"), + new CacheStatsCounter(1, 1, 1, 1, 1), + List.of("A1", "B1", "C2"), + new CacheStatsCounter(2, 2, 2, 2, 2), + List.of("A1", "B2", "C1"), + new CacheStatsCounter(3, 3, 3, 3, 3), + List.of("A2", "B1", "C3"), + new CacheStatsCounter(4, 4, 4, 4, 4) + ) + ); + MultiDimensionCacheStats stats = (MultiDimensionCacheStats) statsHolder.getCacheStats(); + + XContentBuilder builder = XContentFactory.jsonBuilder(); + ToXContent.Params params = ToXContent.EMPTY_PARAMS; + + builder.startObject(); + stats.toXContentForLevels(builder, params, List.of("A", "B", "C")); + builder.endObject(); + String resultString = builder.toString(); + Map result = XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); + + Map> fieldNamesMap = Map.of( + CacheStatsCounterSnapshot.Fields.MEMORY_SIZE_IN_BYTES, + (counter, value) -> counter.sizeInBytes.inc(value), + CacheStatsCounterSnapshot.Fields.EVICTIONS, + (counter, value) -> counter.evictions.inc(value), + CacheStatsCounterSnapshot.Fields.HIT_COUNT, + (counter, value) -> counter.hits.inc(value), + CacheStatsCounterSnapshot.Fields.MISS_COUNT, + (counter, value) -> counter.misses.inc(value), + CacheStatsCounterSnapshot.Fields.ENTRIES, + (counter, value) -> counter.entries.inc(value) + ); + + Map, MultiDimensionCacheStats.MDCSDimensionNode> leafNodes = getAllLeafNodes(stats.getStatsRoot()); + for (Map.Entry, MultiDimensionCacheStats.MDCSDimensionNode> entry : leafNodes.entrySet()) { + List xContentKeys = new ArrayList<>(); + for (int i = 0; i < dimensionNames.size(); i++) { + xContentKeys.add(dimensionNames.get(i)); + xContentKeys.add(entry.getKey().get(i)); + } + CacheStatsCounter counterFromXContent = new CacheStatsCounter(); + + for (Map.Entry> fieldNamesEntry : fieldNamesMap.entrySet()) { + List fullXContentKeys = new ArrayList<>(xContentKeys); + fullXContentKeys.add(fieldNamesEntry.getKey()); + int valueInXContent = (int) getValueFromNestedXContentMap(result, fullXContentKeys); + BiConsumer incrementer = fieldNamesEntry.getValue(); + incrementer.accept(counterFromXContent, valueInXContent); + } + + CacheStatsCounterSnapshot expected = entry.getValue().getStats(); + assertEquals(counterFromXContent.snapshot(), expected); + } + } + + public static Object getValueFromNestedXContentMap(Map xContentMap, List keys) { + Map current = xContentMap; + for (int i = 0; i < keys.size() - 1; i++) { + Object next = current.get(keys.get(i)); + if (next == null) { + return null; + } + current = (Map) next; + } + return current.get(keys.get(keys.size() - 1)); + } + + // Get a map from the list of dimension values to the corresponding leaf node. + private Map, MultiDimensionCacheStats.MDCSDimensionNode> getAllLeafNodes(MultiDimensionCacheStats.MDCSDimensionNode root) { + Map, MultiDimensionCacheStats.MDCSDimensionNode> result = new HashMap<>(); + getAllLeafNodesHelper(result, root, new ArrayList<>()); + return result; + } + + private void getAllLeafNodesHelper( + Map, MultiDimensionCacheStats.MDCSDimensionNode> result, + MultiDimensionCacheStats.MDCSDimensionNode current, + List pathToCurrent + ) { + if (current.children.isEmpty()) { + result.put(pathToCurrent, current); + } else { + for (Map.Entry entry : current.children.entrySet()) { + List newPath = new ArrayList<>(pathToCurrent); + newPath.add(entry.getKey()); + getAllLeafNodesHelper(result, entry.getValue(), newPath); + } + } + } + private MultiDimensionCacheStats.MDCSDimensionNode getNode( List dimensionValues, MultiDimensionCacheStats.MDCSDimensionNode root diff --git a/server/src/test/java/org/opensearch/common/cache/stats/StatsHolderTests.java b/server/src/test/java/org/opensearch/common/cache/stats/StatsHolderTests.java index d351572e05d74..408d9701c3fb6 100644 --- a/server/src/test/java/org/opensearch/common/cache/stats/StatsHolderTests.java +++ b/server/src/test/java/org/opensearch/common/cache/stats/StatsHolderTests.java @@ -22,9 +22,11 @@ import java.util.concurrent.CountDownLatch; public class StatsHolderTests extends OpenSearchTestCase { + private final String storeName = "dummy_store"; + public void testAddAndGet() throws Exception { List dimensionNames = List.of("dim1", "dim2", "dim3", "dim4"); - StatsHolder statsHolder = new StatsHolder(dimensionNames); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); Map> usedDimensionValues = StatsHolderTests.getUsedDimensionValues(statsHolder, 10); Map, CacheStatsCounter> expected = StatsHolderTests.populateStats(statsHolder, usedDimensionValues, 1000, 10); @@ -53,7 +55,7 @@ public void testAddAndGet() throws Exception { public void testReset() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - StatsHolder statsHolder = new StatsHolder(dimensionNames); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); Map> usedDimensionValues = getUsedDimensionValues(statsHolder, 10); Map, CacheStatsCounter> expected = populateStats(statsHolder, usedDimensionValues, 100, 10); @@ -72,7 +74,7 @@ public void testReset() throws Exception { public void testDropStatsForDimensions() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - StatsHolder statsHolder = new StatsHolder(dimensionNames); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); // Create stats for the following dimension sets List> populatedStats = List.of(List.of("A1", "B1"), List.of("A2", "B2"), List.of("A2", "B3")); @@ -108,7 +110,7 @@ public void testDropStatsForDimensions() throws Exception { public void testCount() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - StatsHolder statsHolder = new StatsHolder(dimensionNames); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); Map> usedDimensionValues = getUsedDimensionValues(statsHolder, 10); Map, CacheStatsCounter> expected = populateStats(statsHolder, usedDimensionValues, 100, 10); @@ -121,7 +123,7 @@ public void testCount() throws Exception { public void testConcurrentRemoval() throws Exception { List dimensionNames = List.of("dim1", "dim2"); - StatsHolder statsHolder = new StatsHolder(dimensionNames); + StatsHolder statsHolder = new StatsHolder(dimensionNames, storeName); // Create stats for the following dimension sets List> populatedStats = List.of(List.of("A1", "B1"), List.of("A2", "B2"), List.of("A2", "B3")); @@ -185,33 +187,48 @@ static Map, CacheStatsCounter> populateStats( Map> usedDimensionValues, int numDistinctValuePairs, int numRepetitionsPerValue + ) throws InterruptedException { + return populateStats(List.of(statsHolder), usedDimensionValues, numDistinctValuePairs, numRepetitionsPerValue); + } + + static Map, CacheStatsCounter> populateStats( + List statsHolders, + Map> usedDimensionValues, + int numDistinctValuePairs, + int numRepetitionsPerValue ) throws InterruptedException { Map, CacheStatsCounter> expected = new ConcurrentHashMap<>(); + + for (StatsHolder statsHolder : statsHolders) { + assertEquals(statsHolders.get(0).getDimensionNames(), statsHolder.getDimensionNames()); + } Thread[] threads = new Thread[numDistinctValuePairs]; CountDownLatch countDownLatch = new CountDownLatch(numDistinctValuePairs); Random rand = Randomness.get(); List> dimensionsForThreads = new ArrayList<>(); for (int i = 0; i < numDistinctValuePairs; i++) { - dimensionsForThreads.add(getRandomDimList(statsHolder.getDimensionNames(), usedDimensionValues, true, rand)); + dimensionsForThreads.add(getRandomDimList(statsHolders.get(0).getDimensionNames(), usedDimensionValues, true, rand)); int finalI = i; threads[i] = new Thread(() -> { Random threadRand = Randomness.get(); List dimensions = dimensionsForThreads.get(finalI); expected.computeIfAbsent(dimensions, (key) -> new CacheStatsCounter()); - for (int j = 0; j < numRepetitionsPerValue; j++) { - CacheStatsCounter statsToInc = new CacheStatsCounter( - threadRand.nextInt(10), - threadRand.nextInt(10), - threadRand.nextInt(10), - threadRand.nextInt(5000), - threadRand.nextInt(10) - ); - expected.get(dimensions).hits.inc(statsToInc.getHits()); - expected.get(dimensions).misses.inc(statsToInc.getMisses()); - expected.get(dimensions).evictions.inc(statsToInc.getEvictions()); - expected.get(dimensions).sizeInBytes.inc(statsToInc.getSizeInBytes()); - expected.get(dimensions).entries.inc(statsToInc.getEntries()); - StatsHolderTests.populateStatsHolderFromStatsValueMap(statsHolder, Map.of(dimensions, statsToInc)); + for (StatsHolder statsHolder : statsHolders) { + for (int j = 0; j < numRepetitionsPerValue; j++) { + CacheStatsCounter statsToInc = new CacheStatsCounter( + threadRand.nextInt(10), + threadRand.nextInt(10), + threadRand.nextInt(10), + threadRand.nextInt(5000), + threadRand.nextInt(10) + ); + expected.get(dimensions).hits.inc(statsToInc.getHits()); + expected.get(dimensions).misses.inc(statsToInc.getMisses()); + expected.get(dimensions).evictions.inc(statsToInc.getEvictions()); + expected.get(dimensions).sizeInBytes.inc(statsToInc.getSizeInBytes()); + expected.get(dimensions).entries.inc(statsToInc.getEntries()); + StatsHolderTests.populateStatsHolderFromStatsValueMap(statsHolder, Map.of(dimensions, statsToInc)); + } } countDownLatch.countDown(); }); diff --git a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java index 8b667e86d155c..19f2bc565348f 100644 --- a/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java +++ b/server/src/test/java/org/opensearch/common/cache/store/OpenSearchOnHeapCacheTests.java @@ -15,16 +15,24 @@ import org.opensearch.common.cache.LoadAwareCacheLoader; import org.opensearch.common.cache.RemovalListener; import org.opensearch.common.cache.RemovalNotification; +import org.opensearch.common.cache.stats.CacheStats; import org.opensearch.common.cache.stats.CacheStatsCounterSnapshot; import org.opensearch.common.cache.stats.MultiDimensionCacheStats; import org.opensearch.common.cache.store.config.CacheConfig; import org.opensearch.common.cache.store.settings.OpenSearchOnHeapCacheSettings; import org.opensearch.common.metrics.CounterMetric; import org.opensearch.common.settings.Settings; +import org.opensearch.common.xcontent.XContentFactory; +import org.opensearch.common.xcontent.XContentHelper; +import org.opensearch.core.xcontent.MediaTypeRegistry; +import org.opensearch.core.xcontent.ToXContent; +import org.opensearch.core.xcontent.XContentBuilder; import org.opensearch.test.OpenSearchTestCase; +import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import java.util.Random; import java.util.UUID; @@ -155,6 +163,20 @@ public void onRemoval(RemovalNotification, V> notification) { } } + // Public as this is used in other tests as well + public static Map getStatsXContentMap(CacheStats cacheStats, List levels) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + Map paramMap = Map.of("level", String.join(",", levels)); + ToXContent.Params params = new ToXContent.MapParams(paramMap); + + builder.startObject(); + cacheStats.toXContent(builder, params); + builder.endObject(); + + String resultString = builder.toString(); + return XContentHelper.convertToMap(MediaTypeRegistry.JSON.xContent(), resultString, true); + } + private ICacheKey getICacheKey(String key) { List dims = new ArrayList<>(); for (String dimName : dimensionNames) { diff --git a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java index 1ad6083074025..35ca5d80aeb4e 100644 --- a/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java +++ b/test/framework/src/main/java/org/opensearch/cluster/MockInternalClusterInfoService.java @@ -124,7 +124,8 @@ List adjustNodesStats(List nodesStats) { nodeStats.getSearchPipelineStats(), nodeStats.getSegmentReplicationRejectionStats(), nodeStats.getRepositoriesStats(), - nodeStats.getAdmissionControlStats() + nodeStats.getAdmissionControlStats(), + nodeStats.getNodeCacheStats() ); }).collect(Collectors.toList()); } diff --git a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java index c2b964aa96212..ca80c65e58522 100644 --- a/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/opensearch/test/InternalTestCluster.java @@ -2736,6 +2736,7 @@ public void ensureEstimatedStats() { false, false, false, + false, false ); assertThat(