diff --git a/docs/reference/indices/field-usage-stats.asciidoc b/docs/reference/indices/field-usage-stats.asciidoc
new file mode 100644
index 0000000000000..599f82aabbfd9
--- /dev/null
+++ b/docs/reference/indices/field-usage-stats.asciidoc
@@ -0,0 +1,167 @@
+[[field-usage-stats]]
+=== Field usage stats API
+++++
+Field usage stats
+++++
+
+experimental[]
+
+Returns field usage information for each shard and field
+of an index.
+Field usage statistics are automatically captured when
+queries are running on a cluster. A shard-level search
+request that accesses a given field, even if multiple times
+during that request, is counted as a single use.
+
+[source,console]
+--------------------------------------------------
+GET /my-index-000001/_field_usage_stats
+--------------------------------------------------
+// TEST[setup:messages]
+
+[[field-usage-stats-api-request]]
+==== {api-request-title}
+
+`GET //_field_usage_stats
+
+[[field-usage-stats-api-request-prereqs]]
+==== {api-prereq-title}
+
+* If the {es} {security-features} are enabled, you must have the `manage`
+<> for the target index or index alias.
+
+[[field-usage-stats-api-path-params]]
+==== {api-path-parms-title}
+
+include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index]
+
+[[field-usage-stats-api-query-params]]
+==== {api-query-parms-title}
+
+include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=allow-no-indices]
+
+include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=expand-wildcards]
+
+include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=index-ignore-unavailable]
+
+include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=wait_for_active_shards]
+
+include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=timeoutparms]
+
+`fields`::
++
+--
+(Optional, string)
+Comma-separated list or wildcard expressions of fields
+to include in the statistics.
+--
+
+[[field-usage-stats-api-example]]
+==== {api-examples-title}
+
+//////////////////////////
+
+[source,console]
+--------------------------------------------------
+POST /my-index-000001/_search
+{
+ "query" : {
+ "match" : { "context" : "bar" }
+ },
+ "aggs": {
+ "message_stats": {
+ "string_stats": {
+ "field": "message.keyword",
+ "show_distribution": true
+ }
+ }
+ }
+}
+--------------------------------------------------
+// TEST[setup:messages]
+
+//////////////////////////
+
+The following request retrieves field usage information of index `my-index-000001`
+on the currently available shards.
+
+[source,console]
+--------------------------------------------------
+GET /my-index-000001/_field_usage_stats
+--------------------------------------------------
+// TEST[continued]
+
+The API returns the following response:
+
+[source,console-response]
+--------------------------------------------------
+{
+ "_shards": {
+ "total": 1,
+ "successful": 1,
+ "failed": 0
+ },
+ "my-index-000001": {
+ "shards": [
+ {
+ "tracking_id": "MpOl0QlTQ4SYYhEe6KgJoQ",
+ "tracking_started_at_millis": 1625558985010,
+ "routing": {
+ "state": "STARTED",
+ "primary": true,
+ "node": "gA6KeeVzQkGURFCUyV-e8Q",
+ "relocating_node": null
+ },
+ "stats" : {
+ "all_fields": {
+ "any": "6", <1>
+ "inverted_index": {
+ "terms" : 1,
+ "postings" : 1,
+ "proximity" : 1, <2>
+ "positions" : 0,
+ "term_frequencies" : 1,
+ "offsets" : 0,
+ "payloads" : 0
+ },
+ "stored_fields" : 2,
+ "doc_values" : 1,
+ "points" : 0,
+ "norms" : 1,
+ "term_vectors" : 0
+ },
+ "fields": {
+ "_id": {
+ "any" : 1,
+ "inverted_index": {
+ "terms" : 1,
+ "postings" : 1,
+ "proximity" : 1,
+ "positions" : 0,
+ "term_frequencies" : 1,
+ "offsets" : 0,
+ "payloads" : 0
+ },
+ "stored_fields" : 1,
+ "doc_values" : 0,
+ "points" : 0,
+ "norms" : 0,
+ "term_vectors" : 0
+ },
+ "_source": {...},
+ "context": {...},
+ "message.keyword": {...}
+ }
+ }
+ }
+ ]
+ }
+}
+--------------------------------------------------
+// TESTRESPONSE[s/: \{\.\.\.\}/: $body.$_path/]
+// TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/]
+// TESTRESPONSE[s/: "[^"]*"/: $body.$_path/]
+<1> denotes any kind of use of the field, either inverted index,
+ or stored fields, or doc values, etc.
+<2> denotes any kind of use of either positions, offsets or
+ payloads.
diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.field_usage_stats.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.field_usage_stats.json
new file mode 100644
index 0000000000000..90eb729da200d
--- /dev/null
+++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.field_usage_stats.json
@@ -0,0 +1,57 @@
+{
+ "indices.field_usage_stats": {
+ "documentation": {
+ "url": "https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-field-usage-stats.html",
+ "description": "Returns the field usage stats for each field of an index"
+ },
+ "stability": "experimental",
+ "visibility": "public",
+ "headers": {
+ "accept": [
+ "application/json"
+ ]
+ },
+ "url": {
+ "paths": [
+ {
+ "path": "/{index}/_field_usage_stats",
+ "methods": [
+ "GET"
+ ],
+ "parts": {
+ "index": {
+ "type": "string",
+ "description": "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices"
+ }
+ }
+ }
+ ]
+ },
+ "params": {
+ "fields":{
+ "type":"list",
+ "description":"A comma-separated list of fields to include in the stats if only a subset of fields should be returned (supports wildcards)"
+ },
+ "ignore_unavailable": {
+ "type": "boolean",
+ "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)"
+ },
+ "allow_no_indices": {
+ "type": "boolean",
+ "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)"
+ },
+ "expand_wildcards": {
+ "type": "enum",
+ "options": [
+ "open",
+ "closed",
+ "hidden",
+ "none",
+ "all"
+ ],
+ "default": "open",
+ "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both."
+ }
+ }
+ }
+}
diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/60_field_usage.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/60_field_usage.yml
new file mode 100644
index 0000000000000..9b071b1caf87f
--- /dev/null
+++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/indices.stats/60_field_usage.yml
@@ -0,0 +1,217 @@
+---
+setup:
+ - skip:
+ version: " - 7.14.99"
+ reason: field usage stats API is introduced in 7.15
+
+---
+"Field usage stats":
+ - do:
+ indices.create:
+ index: testindex
+ body:
+ settings:
+ routing.rebalance.enable: none
+ index.number_of_shards: 1
+ index.number_of_replicas: 0
+ mappings:
+ properties:
+ name:
+ type: text
+ "index_options": "offsets"
+ "term_vector" : "with_positions_offsets"
+ price:
+ type: double
+
+ - do:
+ index:
+ index: testindex
+ body: { "name": "foo", "price": 100, "day" : "2003/09/06" }
+
+ - do:
+ index:
+ index: testindex
+ body: { "name": "bar", "price": 120, "day" : "2003/09/07" }
+
+ - do:
+ index:
+ index: testindex
+ body: { "name": "baz", "price": 100, "day" : "2003/09/13" }
+
+ - do:
+ index:
+ index: testindex
+ body: { "name": "bar & baz", "price": 220 }
+ - do:
+ index:
+ index: testindex
+ id: testid
+ body: { "name": "foo bar", "price": 150, "day" : "2003/09/07" }
+
+ - do:
+ indices.refresh: {}
+
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ body:
+ query:
+ bool:
+ must:
+ - match_phrase:
+ name: "foo bar"
+ - range:
+ day:
+ gte: "2003/09/07"
+ sort: [ "price" ]
+
+ - do:
+ indices.field_usage_stats: { index: testindex }
+
+ - is_true: testindex
+ - length: { testindex.shards: 1 }
+ - is_true: testindex.shards.0.tracking_id
+ - gt: { testindex.shards.0.tracking_started_at_millis: 0 }
+ - is_true: testindex.shards.0.routing
+ - is_true: testindex.shards.0.routing.state
+ - is_true: testindex.shards.0.routing.primary
+ - is_true: testindex.shards.0.routing.node
+ - is_true: testindex.shards.0.stats
+
+ # all_fields
+ - set: { testindex.shards.0.stats.all_fields: stat }
+ - gt: { $stat.any: 0 }
+ - gt: { $stat.inverted_index.terms: 0 }
+ - gt: { $stat.inverted_index.postings: 0 }
+ - gt: { $stat.inverted_index.proximity: 0 }
+ - gt: { $stat.stored_fields: 0 }
+ - gt: { $stat.doc_values: 0 }
+ - gt: { $stat.points: 0 }
+ - match: { $stat.norms: 0 }
+ - match: { $stat.term_vectors: 0 }
+ - gt: { $stat.inverted_index.term_frequencies: 0 }
+ - gt: { $stat.inverted_index.positions: 0 }
+ - match: { $stat.inverted_index.offsets: 0 }
+ - match: { $stat.inverted_index.payloads: 0 }
+
+ # name
+ - set: { testindex.shards.0.stats.fields.name: stat }
+ - gt: { $stat.any: 0 }
+ - gt: { $stat.inverted_index.terms: 0 }
+ - gt: { $stat.inverted_index.postings: 0 }
+ - gt: { $stat.inverted_index.proximity: 0 }
+ - match: { $stat.stored_fields: 0 }
+ - match: { $stat.doc_values: 0 }
+ - match: { $stat.points: 0 }
+ - match: { $stat.norms: 0 }
+ - match: { $stat.term_vectors: 0 }
+ - gt: { $stat.inverted_index.term_frequencies: 0 }
+ - gt: { $stat.inverted_index.positions: 0 }
+ - match: { $stat.inverted_index.offsets: 0 }
+ - match: { $stat.inverted_index.payloads: 0 }
+
+ # price
+ - set: { testindex.shards.0.stats.fields.price: stat }
+ - gt: { $stat.any: 0 }
+ - match: { $stat.inverted_index.terms: 0 }
+ - match: { $stat.inverted_index.postings: 0 }
+ - match: { $stat.inverted_index.proximity: 0 }
+ - match: { $stat.stored_fields: 0 }
+ - gt: { $stat.doc_values: 0 }
+ - match: { $stat.points: 0 }
+ - match: { $stat.norms: 0 }
+ - match: { $stat.term_vectors: 0 }
+ - match: { $stat.inverted_index.term_frequencies: 0 }
+ - match: { $stat.inverted_index.positions: 0 }
+ - match: { $stat.inverted_index.offsets: 0 }
+ - match: { $stat.inverted_index.payloads: 0 }
+
+ # day
+ - set: { testindex.shards.0.stats.fields.day: stat }
+ - gt: { $stat.any: 0 }
+ - match: { $stat.inverted_index.terms: 0 }
+ - match: { $stat.inverted_index.postings: 0 }
+ - match: { $stat.inverted_index.proximity: 0 }
+ - match: { $stat.stored_fields: 0 }
+ - gt: { $stat.doc_values: 0 }
+ - gt: { $stat.points: 0 }
+ - match: { $stat.norms: 0 }
+ - match: { $stat.term_vectors: 0 }
+ - match: { $stat.inverted_index.term_frequencies: 0 }
+ - match: { $stat.inverted_index.positions: 0 }
+ - match: { $stat.inverted_index.offsets: 0 }
+ - match: { $stat.inverted_index.payloads: 0 }
+
+ # _source
+ - set: { testindex.shards.0.stats.fields._source: stat }
+ - gt: { $stat.any: 0 }
+ - match: { $stat.inverted_index.terms: 0 }
+ - match: { $stat.inverted_index.postings: 0 }
+ - match: { $stat.inverted_index.proximity: 0 }
+ - gt: { $stat.stored_fields: 0 }
+ - match: { $stat.doc_values: 0 }
+ - match: { $stat.points: 0 }
+ - match: { $stat.norms: 0 }
+ - match: { $stat.term_vectors: 0 }
+ - match: { $stat.inverted_index.term_frequencies: 0 }
+ - match: { $stat.inverted_index.positions: 0 }
+ - match: { $stat.inverted_index.offsets: 0 }
+ - match: { $stat.inverted_index.payloads: 0 }
+
+ # _id
+ - set: { testindex.shards.0.stats.fields._id: stat }
+ - gt: { $stat.any: 0 }
+ - match: { $stat.inverted_index.terms: 0 }
+ - match: { $stat.inverted_index.postings: 0 }
+ - match: { $stat.inverted_index.proximity: 0 }
+ - gt: { $stat.stored_fields: 0 }
+ - match: { $stat.doc_values: 0 }
+ - match: { $stat.points: 0 }
+ - match: { $stat.norms: 0 }
+ - match: { $stat.term_vectors: 0 }
+ - match: { $stat.inverted_index.term_frequencies: 0 }
+ - match: { $stat.inverted_index.positions: 0 }
+ - match: { $stat.inverted_index.offsets: 0 }
+ - match: { $stat.inverted_index.payloads: 0 }
+
+ - do:
+ termvectors:
+ index: testindex
+ id: testid
+ term_statistics : true
+ fields: name
+
+ - do:
+ indices.field_usage_stats: { index: testindex }
+
+ # name
+ - set: { testindex.shards.0.stats.fields.name: stat }
+ - gt: { $stat.term_vectors: 0 }
+
+ - do:
+ search:
+ rest_total_hits_as_int: true
+ body:
+ query:
+ match_phrase:
+ name: "foo bar"
+
+ - do:
+ indices.field_usage_stats: { index: testindex }
+
+ # name
+ - set: { testindex.shards.0.stats.fields.name: stat }
+ - gt: { $stat.norms: 0 }
+
+ - do:
+ search:
+ body: {
+ "query" : { "match_phrase" : { "name" : "foo bar" } },
+ "highlight" : { "type" : "unified", "fields" : { "*" : {} } } }
+
+ - do:
+ indices.field_usage_stats: { index: testindex }
+
+ # name
+ - set: { testindex.shards.0.stats.fields.name: stat }
+ - gt: { $stat.inverted_index.offsets: 0 }
diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java
new file mode 100644
index 0000000000000..68010f7ebb911
--- /dev/null
+++ b/server/src/internalClusterTest/java/org/elasticsearch/search/stats/FieldUsageStatsIT.java
@@ -0,0 +1,153 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.search.stats;
+
+import org.elasticsearch.action.admin.indices.stats.FieldUsageShardResponse;
+import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsAction;
+import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsRequest;
+import org.elasticsearch.action.search.SearchResponse;
+import org.elasticsearch.action.search.SearchType;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.set.Sets;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.index.search.stats.FieldUsageStats;
+import org.elasticsearch.index.search.stats.FieldUsageStats.UsageContext;
+import org.elasticsearch.search.aggregations.AggregationBuilders;
+import org.elasticsearch.test.ESIntegTestCase;
+
+import java.time.LocalDate;
+import java.time.format.DateTimeFormatter;
+import java.util.Collections;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_REPLICAS;
+import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+
+public class FieldUsageStatsIT extends ESIntegTestCase {
+
+ @Override
+ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
+ return Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings))
+ .put("search.aggs.rewrite_to_filter_by_filter", false)
+ .build();
+ }
+
+ private FieldUsageStats aggregated(List stats) {
+ assertFalse(stats.isEmpty());
+ return stats.stream().map(FieldUsageShardResponse::getStats).reduce(FieldUsageStats::add).get();
+ }
+
+ public void testFieldUsageStats() throws ExecutionException, InterruptedException {
+ internalCluster().ensureAtLeastNumDataNodes(2);
+ int numShards = randomIntBetween(1, 2);
+ assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder()
+ .put(SETTING_NUMBER_OF_SHARDS, numShards)
+ .put(SETTING_NUMBER_OF_REPLICAS, 1)));
+
+ DateTimeFormatter formatter = DateTimeFormatter.ofPattern("yyyy/MM/dd", Locale.ROOT);
+ LocalDate date = LocalDate.of(2015, 9, 1);
+
+ for (int i = 0; i < 30; i++) {
+ client().prepareIndex("test", "_doc").setId(Integer.toString(i)).setSource(
+ "field", "value", "field2", "value2", "date_field", formatter.format(date.plusDays(i))).get();
+ }
+ client().admin().indices().prepareRefresh("test").get();
+
+ ensureGreen("test");
+
+ FieldUsageStats stats =
+ aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test"));
+
+ assertFalse(stats.hasField("field"));
+ assertFalse(stats.hasField("field.keyword"));
+ assertFalse(stats.hasField("field2"));
+ assertFalse(stats.hasField("date_field"));
+
+ SearchResponse searchResponse = client().prepareSearch()
+ .setSearchType(SearchType.DEFAULT)
+ .setQuery(QueryBuilders.termQuery("field", "value"))
+ .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword"))
+ .addAggregation(AggregationBuilders.filter("agg2", QueryBuilders.spanTermQuery("field2", "value2")))
+ .setSize(between(5, 100))
+ .setPreference("fixed")
+ .get();
+
+ assertHitCount(searchResponse, 30);
+ assertAllSuccessful(searchResponse);
+
+ stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test"));
+ logger.info("Stats after first query: {}", stats);
+
+ assertTrue(stats.hasField("_id"));
+ assertEquals(Collections.singleton(UsageContext.STORED_FIELDS), stats.get("_id").keySet());
+ assertTrue(stats.hasField("_source"));
+ assertEquals(Collections.singleton(UsageContext.STORED_FIELDS), stats.get("_source").keySet());
+
+ assertTrue(stats.hasField("field"));
+ // we sort by _score
+ assertEquals(Sets.newHashSet(UsageContext.TERMS, UsageContext.POSTINGS, UsageContext.FREQS, UsageContext.NORMS),
+ stats.get("field").keySet());
+ assertEquals(1L * numShards, stats.get("field").getTerms());
+
+ assertTrue(stats.hasField("field2"));
+ // positions because of span query
+ assertEquals(Sets.newHashSet(UsageContext.TERMS, UsageContext.POSTINGS, UsageContext.FREQS, UsageContext.POSITIONS),
+ stats.get("field2").keySet());
+ assertEquals(1L * numShards, stats.get("field2").getTerms());
+
+ assertTrue(stats.hasField("field.keyword"));
+ // terms agg does not use search as we've set search.aggs.rewrite_to_filter_by_filter to false
+ assertEquals(Sets.newHashSet(UsageContext.DOC_VALUES), stats.get("field.keyword").keySet());
+ assertEquals(1L * numShards, stats.get("field.keyword").getDocValues());
+
+ client().prepareSearch()
+ .setSearchType(SearchType.DEFAULT)
+ .setQuery(QueryBuilders.termQuery("field", "value"))
+ .addAggregation(AggregationBuilders.terms("agg1").field("field.keyword"))
+ .setSize(0)
+ .setPreference("fixed")
+ .get();
+
+ stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test"));
+ logger.info("Stats after second query: {}", stats);
+
+ assertEquals(2L * numShards, stats.get("field").getTerms());
+ assertEquals(1L * numShards, stats.get("field2").getTerms());
+ assertEquals(2L * numShards, stats.get("field.keyword").getDocValues());
+
+ assertFalse(stats.hasField("date_field"));
+
+ // show that we also track stats in can_match
+ assertEquals(2L * numShards, client().admin().indices().prepareStats("test").clear().setSearch(true).get()
+ .getIndex("test").getTotal().getSearch().getTotal().getQueryCount());
+ client().prepareSearch()
+ .setSearchType(SearchType.DEFAULT)
+ .setPreFilterShardSize(1)
+ .setQuery(QueryBuilders.rangeQuery("date_field").from("2016/01/01"))
+ .setSize(100)
+ .setPreference("fixed")
+ .get();
+
+ stats = aggregated(client().execute(FieldUsageStatsAction.INSTANCE, new FieldUsageStatsRequest()).get().getStats().get("test"));
+ logger.info("Stats after third query: {}", stats);
+
+ assertTrue(stats.hasField("date_field"));
+ assertEquals(Sets.newHashSet(UsageContext.POINTS), stats.get("date_field").keySet());
+ // can_match does not enter search stats
+ // there is a special case though where we have no hit but we need to get at least one search response in order
+ // to produce a valid search result with all the aggs etc., so we hit one of the two shards
+ assertEquals((2 * numShards) + 1, client().admin().indices().prepareStats("test").clear().setSearch(true).get()
+ .getIndex("test").getTotal().getSearch().getTotal().getQueryCount());
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java
index c3ab89245dedb..d2e19cdd42858 100644
--- a/server/src/main/java/org/elasticsearch/action/ActionModule.java
+++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java
@@ -111,6 +111,8 @@
import org.elasticsearch.action.admin.indices.dangling.list.TransportListDanglingIndicesAction;
import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction;
import org.elasticsearch.action.admin.indices.delete.TransportDeleteIndexAction;
+import org.elasticsearch.action.admin.indices.diskusage.AnalyzeIndexDiskUsageAction;
+import org.elasticsearch.action.admin.indices.diskusage.TransportAnalyzeIndexDiskUsageAction;
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsAction;
import org.elasticsearch.action.admin.indices.exists.indices.TransportIndicesExistsAction;
import org.elasticsearch.action.admin.indices.exists.types.TransportTypesExistsAction;
@@ -154,9 +156,9 @@
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
import org.elasticsearch.action.admin.indices.shrink.ResizeAction;
import org.elasticsearch.action.admin.indices.shrink.TransportResizeAction;
-import org.elasticsearch.action.admin.indices.diskusage.AnalyzeIndexDiskUsageAction;
+import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsAction;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
-import org.elasticsearch.action.admin.indices.diskusage.TransportAnalyzeIndexDiskUsageAction;
+import org.elasticsearch.action.admin.indices.stats.TransportFieldUsageAction;
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteComponentTemplateAction;
import org.elasticsearch.action.admin.indices.template.delete.DeleteComposableIndexTemplateAction;
@@ -318,6 +320,7 @@
import org.elasticsearch.rest.action.admin.indices.RestDeleteComposableIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.RestDeleteIndexAction;
import org.elasticsearch.rest.action.admin.indices.RestDeleteIndexTemplateAction;
+import org.elasticsearch.rest.action.admin.indices.RestFieldUsageStatsAction;
import org.elasticsearch.rest.action.admin.indices.RestFlushAction;
import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction;
import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction;
@@ -609,6 +612,7 @@ public void reg
actions.register(AutoCreateAction.INSTANCE, AutoCreateAction.TransportAction.class);
actions.register(ResolveIndexAction.INSTANCE, ResolveIndexAction.TransportAction.class);
actions.register(AnalyzeIndexDiskUsageAction.INSTANCE, TransportAnalyzeIndexDiskUsageAction.class);
+ actions.register(FieldUsageStatsAction.INSTANCE, TransportFieldUsageAction.class);
//Indexed scripts
actions.register(PutStoredScriptAction.INSTANCE, TransportPutStoredScriptAction.class);
@@ -813,6 +817,7 @@ public void initRestHandlers(Supplier nodesInCluster) {
registerHandler.accept(new RestSnapshotAction());
registerHandler.accept(new RestTemplatesAction());
registerHandler.accept(new RestAnalyzeIndexDiskUsageAction());
+ registerHandler.accept(new RestFieldUsageStatsAction());
for (ActionPlugin plugin : actionPlugins) {
for (RestHandler handler : plugin.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings,
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java
index 1046182d43b49..676e0bf99eb4a 100644
--- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java
@@ -9,7 +9,6 @@
package org.elasticsearch.action.admin.indices.stats;
import org.apache.lucene.store.AlreadyClosedException;
-import org.elasticsearch.core.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
@@ -17,6 +16,7 @@
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.core.Nullable;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.cache.request.RequestCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java
new file mode 100644
index 0000000000000..4f17d065e62a4
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardRequest.java
@@ -0,0 +1,62 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+public class FieldUsageShardRequest extends BroadcastShardRequest {
+
+ private final String[] fields;
+
+ FieldUsageShardRequest(ShardId shardId, FieldUsageStatsRequest request) {
+ super(shardId, request);
+ this.fields = request.fields();
+ }
+
+
+ FieldUsageShardRequest(StreamInput in) throws IOException {
+ super(in);
+ this.fields = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(fields);
+ }
+
+ @Override
+ public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) {
+ return new CancellableTask(id, type, action, "", parentTaskId, headers) {
+ @Override
+ public String getDescription() {
+ return FieldUsageShardRequest.this.getDescription();
+ }
+ };
+ }
+
+ @Override
+ public String getDescription() {
+ return "get field usage for shard: [" + shardId() + "], fields: " + Arrays.toString(fields);
+ }
+
+ public String[] fields() {
+ return fields;
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardResponse.java
new file mode 100644
index 0000000000000..b33b4ab369c8a
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageShardResponse.java
@@ -0,0 +1,94 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContentObject;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.index.search.stats.FieldUsageStats;
+
+import java.io.IOException;
+import java.util.Objects;
+
+public class FieldUsageShardResponse implements Writeable, ToXContentObject {
+
+ final String trackingId;
+ final ShardRouting shardRouting;
+ final long trackingStartTime;
+ final FieldUsageStats stats;
+
+ FieldUsageShardResponse(StreamInput in) throws IOException {
+ trackingId = in.readString();
+ shardRouting = new ShardRouting(in);
+ trackingStartTime = in.readVLong();
+ stats = new FieldUsageStats(in);
+ }
+
+ FieldUsageShardResponse(String trackingId, ShardRouting shardRouting, long trackingStartTime, FieldUsageStats stats) {
+ this.trackingId = Objects.requireNonNull(trackingId, "trackingId must be non null");
+ this.shardRouting = Objects.requireNonNull(shardRouting, "routing must be non null");
+ this.trackingStartTime = trackingStartTime;
+ this.stats = Objects.requireNonNull(stats, "stats must be non null");
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(trackingId);
+ shardRouting.writeTo(out);
+ out.writeVLong(trackingStartTime);
+ stats.writeTo(out);
+ }
+
+ public String getTrackingId() {
+ return trackingId;
+ }
+
+ public ShardRouting getShardRouting() {
+ return shardRouting;
+ }
+
+ public long getTrackingStartTime() {
+ return trackingStartTime;
+ }
+
+ public FieldUsageStats getStats() {
+ return stats;
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.field(Fields.TRACKING_ID, trackingId);
+ builder.timeField(Fields.TRACKING_STARTED_AT_MILLIS, Fields.TRACKING_STARTED_AT, trackingStartTime);
+ builder.startObject(Fields.ROUTING)
+ .field(Fields.STATE, shardRouting.state())
+ .field(Fields.PRIMARY, shardRouting.primary())
+ .field(Fields.NODE, shardRouting.currentNodeId())
+ .field(Fields.RELOCATING_NODE, shardRouting.relocatingNodeId())
+ .endObject();
+ builder.field(Fields.STATS, stats, params);
+ builder.endObject();
+ return builder;
+ }
+
+ static final class Fields {
+ static final String TRACKING_ID = "tracking_id";
+ static final String TRACKING_STARTED_AT_MILLIS = "tracking_started_at_millis";
+ static final String TRACKING_STARTED_AT = "tracking_started_at";
+ static final String STATS = "stats";
+ static final String ROUTING = "routing";
+ static final String STATE = "state";
+ static final String PRIMARY = "primary";
+ static final String NODE = "node";
+ static final String RELOCATING_NODE = "relocating_node";
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsAction.java
new file mode 100644
index 0000000000000..825b66f63d812
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsAction.java
@@ -0,0 +1,21 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.ActionType;
+
+public class FieldUsageStatsAction extends ActionType {
+
+ public static final FieldUsageStatsAction INSTANCE = new FieldUsageStatsAction();
+ public static final String NAME = "indices:monitor/field_usage_stats";
+
+ private FieldUsageStatsAction() {
+ super(NAME, FieldUsageStatsResponse::new);
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsRequest.java
new file mode 100644
index 0000000000000..795241fe59b21
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsRequest.java
@@ -0,0 +1,71 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.action.support.broadcast.BroadcastRequest;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.tasks.CancellableTask;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.tasks.TaskId;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Map;
+
+public class FieldUsageStatsRequest extends BroadcastRequest {
+
+ private String[] fields = Strings.EMPTY_ARRAY;
+
+ public FieldUsageStatsRequest(String... indices) {
+ super(indices);
+ }
+
+ public FieldUsageStatsRequest(String[] indices, IndicesOptions indicesOptions) {
+ super(indices, indicesOptions);
+ }
+
+ public FieldUsageStatsRequest(StreamInput in) throws IOException {
+ super(in);
+ fields = in.readStringArray();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeStringArray(fields);
+ }
+
+ public FieldUsageStatsRequest fields(String... fields) {
+ this.fields = fields;
+ return this;
+ }
+
+ public String[] fields() {
+ return this.fields;
+ }
+
+ @Override
+ public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) {
+ return new CancellableTask(id, FieldUsageStatsAction.NAME, type, "", parentTaskId, headers) {
+ @Override
+ public String getDescription() {
+ return FieldUsageStatsRequest.this.getDescription();
+ }
+ };
+ }
+
+ @Override
+ public String getDescription() {
+ return "get field usage for indices [" + String.join(",", indices) + "], fields " + Arrays.toString(fields);
+ }
+
+}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java
new file mode 100644
index 0000000000000..730598e347d4c
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java
@@ -0,0 +1,61 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.BroadcastResponse;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+public class FieldUsageStatsResponse extends BroadcastResponse {
+ private final Map> stats;
+
+ FieldUsageStatsResponse(int totalShards, int successfulShards, int failedShards,
+ List shardFailures,
+ Map> stats) {
+ super(totalShards, successfulShards, failedShards, shardFailures);
+ this.stats = stats;
+ }
+
+ FieldUsageStatsResponse(StreamInput in) throws IOException {
+ super(in);
+ stats = in.readMap(StreamInput::readString, i -> i.readList(FieldUsageShardResponse::new));
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ super.writeTo(out);
+ out.writeMap(stats, StreamOutput::writeString, StreamOutput::writeList);
+ }
+
+ public Map> getStats() {
+ return stats;
+ }
+
+ @Override
+ protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException {
+ final List>> sortedEntries =
+ stats.entrySet().stream().sorted(Map.Entry.comparingByKey()).collect(Collectors.toList());
+ for (Map.Entry> entry : sortedEntries) {
+ builder.startObject(entry.getKey());
+ builder.startArray("shards");
+ for (FieldUsageShardResponse resp : entry.getValue()) {
+ resp.toXContent(builder, params);
+ }
+ builder.endArray();
+ builder.endObject();
+ }
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java
new file mode 100644
index 0000000000000..d15ea4a95577e
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportFieldUsageAction.java
@@ -0,0 +1,105 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.action.admin.indices.stats;
+
+import org.elasticsearch.action.ActionListener;
+import org.elasticsearch.action.support.ActionFilters;
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.block.ClusterBlockException;
+import org.elasticsearch.cluster.block.ClusterBlockLevel;
+import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.ShardsIterator;
+import org.elasticsearch.cluster.service.ClusterService;
+import org.elasticsearch.common.inject.Inject;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.index.shard.IndexShard;
+import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.indices.IndicesService;
+import org.elasticsearch.tasks.Task;
+import org.elasticsearch.threadpool.ThreadPool;
+import org.elasticsearch.transport.TransportService;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+public class TransportFieldUsageAction extends TransportBroadcastByNodeAction {
+
+ private final IndicesService indicesService;
+
+ @Inject
+ public TransportFieldUsageAction(ClusterService clusterService,
+ TransportService transportService,
+ IndicesService indexServices, ActionFilters actionFilters,
+ IndexNameExpressionResolver indexNameExpressionResolver) {
+ super(FieldUsageStatsAction.NAME, clusterService, transportService, actionFilters, indexNameExpressionResolver,
+ FieldUsageStatsRequest::new, ThreadPool.Names.SAME);
+ this.indicesService = indexServices;
+ }
+
+ @Override
+ protected FieldUsageShardResponse readShardResult(StreamInput in) throws IOException {
+ return new FieldUsageShardResponse(in);
+ }
+
+ @Override
+ protected FieldUsageStatsResponse newResponse(FieldUsageStatsRequest request, int totalShards, int successfulShards, int failedShards,
+ List fieldUsages,
+ List shardFailures,
+ ClusterState clusterState) {
+ final Map> combined = new HashMap<>();
+ for (FieldUsageShardResponse response : fieldUsages) {
+ combined.computeIfAbsent(response.shardRouting.shardId().getIndexName(), i -> new ArrayList<>()).add(response);
+ }
+ return new FieldUsageStatsResponse(
+ totalShards,
+ successfulShards,
+ shardFailures.size(),
+ shardFailures,
+ combined);
+ }
+
+ @Override
+ protected FieldUsageStatsRequest readRequestFrom(StreamInput in) throws IOException {
+ return new FieldUsageStatsRequest(in);
+ }
+
+ @Override
+ protected void shardOperation(FieldUsageStatsRequest request, ShardRouting shardRouting, Task task,
+ ActionListener listener) {
+ ActionListener.completeWith(listener, () -> {
+ final ShardId shardId = shardRouting.shardId();
+ final IndexShard shard = indicesService.indexServiceSafe(shardId.getIndex()).getShard(shardId.id());
+ return new FieldUsageShardResponse(shard.getShardUuid(), shardRouting,
+ shard.getShardCreationTime(), shard.fieldUsageStats(request.fields()));
+ });
+ }
+
+ @Override
+ protected ShardsIterator shards(ClusterState clusterState, FieldUsageStatsRequest request, String[] concreteIndices) {
+ return clusterState.routingTable().allActiveShards(concreteIndices);
+ }
+
+ @Override
+ protected ClusterBlockException checkGlobalBlock(ClusterState state, FieldUsageStatsRequest request) {
+ return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
+ }
+
+ @Override
+ protected ClusterBlockException checkRequestBlock(ClusterState state, FieldUsageStatsRequest request,
+ String[] concreteIndices) {
+ return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices);
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
index 8beacdc3e654f..4a7f474f3b932 100644
--- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
+++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java
@@ -256,6 +256,10 @@ public ShardsIterator allShards(String[] indices) {
return allShardsSatisfyingPredicate(indices, shardRouting -> true, false);
}
+ public ShardsIterator allActiveShards(String[] indices) {
+ return allShardsSatisfyingPredicate(indices, ShardRouting::active, false);
+ }
+
public ShardsIterator allShardsIncludingRelocationTargets(String[] indices) {
return allShardsSatisfyingPredicate(indices, shardRouting -> true, true);
}
diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java
index 750dabf187bf1..d4efb5eacb1ab 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java
@@ -31,6 +31,7 @@
import org.apache.lucene.search.Weight;
import org.apache.lucene.util.BitSet;
import org.apache.lucene.util.BitSetIterator;
+import org.elasticsearch.search.internal.FilterStoredFieldVisitor;
import java.io.IOException;
import java.util.Arrays;
@@ -257,47 +258,6 @@ public StoredFieldsReader clone() {
}
- private static class FilterStoredFieldVisitor extends StoredFieldVisitor {
- private final StoredFieldVisitor visitor;
-
- FilterStoredFieldVisitor(StoredFieldVisitor visitor) {
- this.visitor = visitor;
- }
-
- @Override
- public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException {
- visitor.binaryField(fieldInfo, value);
- }
-
- @Override
- public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException {
- visitor.stringField(fieldInfo, value);
- }
-
- @Override
- public void intField(FieldInfo fieldInfo, int value) throws IOException {
- visitor.intField(fieldInfo, value);
- }
-
- @Override
- public void longField(FieldInfo fieldInfo, long value) throws IOException {
- visitor.longField(fieldInfo, value);
- }
-
- @Override
- public void floatField(FieldInfo fieldInfo, float value) throws IOException {
- visitor.floatField(fieldInfo, value);
- }
-
- @Override
- public void doubleField(FieldInfo fieldInfo, double value) throws IOException {
- visitor.doubleField(fieldInfo, value);
- }
-
- @Override
- public Status needsField(FieldInfo fieldInfo) throws IOException {
- return visitor.needsField(fieldInfo);
- }
- }
}
+
}
diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java
index 00008ded01314..9946bb61a74ff 100644
--- a/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java
+++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogDirectoryReader.java
@@ -41,7 +41,7 @@
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.internal.io.IOUtils;
-import org.elasticsearch.index.fieldvisitor.FieldsVisitor;
+import org.elasticsearch.index.fieldvisitor.FieldNamesProvidingStoredFieldsVisitor;
import org.elasticsearch.index.mapper.DocumentParser;
import org.elasticsearch.index.mapper.IdFieldMapper;
import org.elasticsearch.index.mapper.MappingLookup;
@@ -319,9 +319,9 @@ public void document(int docID, StoredFieldVisitor visitor) throws IOException {
throw new IllegalArgumentException("no such doc ID " + docID);
}
if (delegate.get() == null) {
- if (visitor instanceof FieldsVisitor) {
+ if (visitor instanceof FieldNamesProvidingStoredFieldsVisitor) {
// override this for ShardGetService
- if (TRANSLOG_FIELD_NAMES.containsAll(((FieldsVisitor) visitor).getFieldNames())) {
+ if (TRANSLOG_FIELD_NAMES.containsAll(((FieldNamesProvidingStoredFieldsVisitor) visitor).getFieldNames())) {
readStoredFieldsDirectly(visitor);
return;
}
diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldNamesProvidingStoredFieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldNamesProvidingStoredFieldsVisitor.java
new file mode 100644
index 0000000000000..fc3b07fd7d19e
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldNamesProvidingStoredFieldsVisitor.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.index.fieldvisitor;
+
+import org.apache.lucene.index.StoredFieldVisitor;
+
+import java.util.Set;
+
+/**
+ * Stored fields visitor which provides information about the field names that will be requested
+ */
+public abstract class FieldNamesProvidingStoredFieldsVisitor extends StoredFieldVisitor {
+ public abstract Set getFieldNames();
+}
diff --git a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java
index fc19d1e80592e..8b6958b5dcca6 100644
--- a/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java
+++ b/server/src/main/java/org/elasticsearch/index/fieldvisitor/FieldsVisitor.java
@@ -36,7 +36,7 @@
/**
* Base {@link StoredFieldVisitor} that retrieves all non-redundant metadata.
*/
-public class FieldsVisitor extends StoredFieldVisitor {
+public class FieldsVisitor extends FieldNamesProvidingStoredFieldsVisitor {
private static final Set BASE_REQUIRED_FIELDS = unmodifiableSet(newHashSet(
IdFieldMapper.NAME,
RoutingFieldMapper.NAME));
@@ -77,6 +77,7 @@ public Status needsField(FieldInfo fieldInfo) {
: Status.NO;
}
+ @Override
public Set getFieldNames() {
return requiredFields;
}
diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/FieldUsageStats.java b/server/src/main/java/org/elasticsearch/index/search/stats/FieldUsageStats.java
new file mode 100644
index 0000000000000..966d06197731c
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/index/search/stats/FieldUsageStats.java
@@ -0,0 +1,325 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.index.search.stats;
+
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+import org.elasticsearch.common.xcontent.ToXContentFragment;
+import org.elasticsearch.common.xcontent.ToXContentObject;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+public class FieldUsageStats implements ToXContentObject, Writeable {
+ public static final String ANY = "any";
+ public static final String INVERTED_INDEX = "inverted_index";
+ public static final String TERMS = "terms";
+ public static final String POSTINGS = "postings";
+ public static final String TERM_FREQUENCIES = "term_frequencies";
+ public static final String POSITIONS = "positions";
+ public static final String OFFSETS = "offsets";
+ public static final String DOC_VALUES = "doc_values";
+ public static final String STORED_FIELDS = "stored_fields";
+ public static final String NORMS = "norms";
+ public static final String PAYLOADS = "payloads";
+ public static final String TERM_VECTORS = "term_vectors"; // possibly refine this one
+ public static final String POINTS = "points";
+ public static final String PROXIMITY = "proximity";
+
+ private final Map stats;
+
+ public FieldUsageStats() {
+ this.stats = new HashMap<>();
+ }
+
+ public FieldUsageStats(Map stats) {
+ this.stats = new HashMap<>(stats);
+ }
+
+ public FieldUsageStats(StreamInput in) throws IOException {
+ stats = in.readMap(StreamInput::readString, PerFieldUsageStats::new);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeMap(stats, StreamOutput::writeString, (o, v) -> v.writeTo(o));
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.startObject();
+ builder.startObject("all_fields");
+ total().toXContent(builder, params);
+ builder.endObject();
+
+ builder.startObject("fields");
+ {
+ final List> sortedEntries =
+ stats.entrySet().stream().sorted(Map.Entry.comparingByKey()).collect(Collectors.toList());
+ for (Map.Entry entry : sortedEntries) {
+ builder.startObject(entry.getKey());
+ entry.getValue().toXContent(builder, params);
+ builder.endObject();
+ }
+ }
+ builder.endObject();
+ builder.endObject();
+ return builder;
+ }
+
+ PerFieldUsageStats total() {
+ PerFieldUsageStats total = PerFieldUsageStats.EMPTY;
+ for (PerFieldUsageStats value : stats.values()) {
+ total = total.add(value);
+ }
+ return total;
+ }
+
+ @Override
+ public String toString() {
+ return Strings.toString(this);
+ }
+
+ public boolean hasField(String field) {
+ return stats.containsKey(field);
+ }
+
+ public PerFieldUsageStats get(String field) {
+ return stats.get(field);
+ }
+
+ public FieldUsageStats add(FieldUsageStats other) {
+ FieldUsageStats newStats = new FieldUsageStats(stats);
+ other.stats.forEach((k, v) -> newStats.stats.merge(k, v, PerFieldUsageStats::add));
+ return newStats;
+ }
+
+ public enum UsageContext {
+ DOC_VALUES,
+ STORED_FIELDS,
+ TERMS,
+ POSTINGS,
+ FREQS,
+ POSITIONS,
+ OFFSETS,
+ NORMS,
+ PAYLOADS,
+ TERM_VECTORS, // possibly refine this one
+ POINTS,
+ }
+
+ public static class PerFieldUsageStats implements ToXContentFragment, Writeable {
+
+ static final PerFieldUsageStats EMPTY = new PerFieldUsageStats(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ private final long any;
+ private final long proximity;
+ private final long terms;
+ private final long postings;
+ private final long termFrequencies;
+ private final long positions;
+ private final long offsets;
+ private final long docValues;
+ private final long storedFields;
+ private final long norms;
+ private final long payloads;
+ private final long termVectors;
+ private final long points;
+
+ public PerFieldUsageStats(long any, long proximity, long terms, long postings, long termFrequencies, long positions, long offsets,
+ long docValues, long storedFields, long norms, long payloads, long termVectors, long points) {
+ this.any = any;
+ this.proximity = proximity;
+ this.terms = terms;
+ this.postings = postings;
+ this.termFrequencies = termFrequencies;
+ this.positions = positions;
+ this.offsets = offsets;
+ this.docValues = docValues;
+ this.storedFields = storedFields;
+ this.norms = norms;
+ this.payloads = payloads;
+ this.termVectors = termVectors;
+ this.points = points;
+ }
+
+ private PerFieldUsageStats add(PerFieldUsageStats other) {
+ return new PerFieldUsageStats(
+ any + other.any,
+ proximity + other.proximity,
+ terms + other.terms,
+ postings + other.postings,
+ termFrequencies + other.termFrequencies,
+ positions + other.positions,
+ offsets + other.offsets,
+ docValues + other.docValues,
+ storedFields + other.storedFields,
+ norms + other.norms,
+ payloads + other.payloads,
+ termVectors + other.termVectors,
+ points + other.points);
+ }
+
+ public PerFieldUsageStats(StreamInput in) throws IOException {
+ any = in.readVLong();
+ proximity = in.readVLong();
+ terms = in.readVLong();
+ postings = in.readVLong();
+ termFrequencies = in.readVLong();
+ positions = in.readVLong();
+ offsets = in.readVLong();
+ docValues = in.readVLong();
+ storedFields = in.readVLong();
+ norms = in.readVLong();
+ payloads = in.readVLong();
+ termVectors = in.readVLong();
+ points = in.readVLong();
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeVLong(any);
+ out.writeVLong(proximity);
+ out.writeVLong(terms);
+ out.writeVLong(postings);
+ out.writeVLong(termFrequencies);
+ out.writeVLong(positions);
+ out.writeVLong(offsets);
+ out.writeVLong(docValues);
+ out.writeVLong(storedFields);
+ out.writeVLong(norms);
+ out.writeVLong(payloads);
+ out.writeVLong(termVectors);
+ out.writeVLong(points);
+ }
+
+ @Override
+ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
+ builder.field(ANY, any);
+ builder.startObject(INVERTED_INDEX);
+ builder.field(TERMS, terms);
+ builder.field(POSTINGS, postings);
+ builder.field(TERM_FREQUENCIES, termFrequencies);
+ builder.field(POSITIONS, positions);
+ builder.field(OFFSETS, offsets);
+ builder.field(PAYLOADS, payloads);
+ builder.field(PROXIMITY, proximity);
+ builder.endObject();
+ builder.field(STORED_FIELDS, storedFields);
+ builder.field(DOC_VALUES, docValues);
+ builder.field(POINTS, points);
+ builder.field(NORMS, norms);
+ builder.field(TERM_VECTORS, termVectors);
+ return builder;
+ }
+
+ public Set keySet() {
+ final EnumSet set = EnumSet.noneOf(UsageContext.class);
+ if (terms > 0L) {
+ set.add(UsageContext.TERMS);
+ }
+ if (postings > 0L) {
+ set.add(UsageContext.POSTINGS);
+ }
+ if (termFrequencies > 0L) {
+ set.add(UsageContext.FREQS);
+ }
+ if (positions > 0L) {
+ set.add(UsageContext.POSITIONS);
+ }
+ if (offsets > 0L) {
+ set.add(UsageContext.OFFSETS);
+ }
+ if (docValues > 0L) {
+ set.add(UsageContext.DOC_VALUES);
+ }
+ if (storedFields > 0L) {
+ set.add(UsageContext.STORED_FIELDS);
+ }
+ if (norms > 0L) {
+ set.add(UsageContext.NORMS);
+ }
+ if (payloads > 0L) {
+ set.add(UsageContext.PAYLOADS);
+ }
+ if (termVectors > 0L) {
+ set.add(UsageContext.TERM_VECTORS);
+ }
+ if (points > 0L) {
+ set.add(UsageContext.POINTS);
+ }
+ return set;
+ }
+
+ public long getTerms() {
+ return terms;
+ }
+
+ public long getPostings() {
+ return postings;
+ }
+
+ public long getTermFrequencies() {
+ return termFrequencies;
+ }
+
+ public long getPositions() {
+ return positions;
+ }
+
+ public long getOffsets() {
+ return offsets;
+ }
+
+ public long getDocValues() {
+ return docValues;
+ }
+
+ public long getStoredFields() {
+ return storedFields;
+ }
+
+ public long getNorms() {
+ return norms;
+ }
+
+ public long getPayloads() {
+ return payloads;
+ }
+
+ public long getTermVectors() {
+ return termVectors;
+ }
+
+ public long getPoints() {
+ return points;
+ }
+
+ public long getProximity() {
+ return proximity;
+ }
+
+ public long getAny() {
+ return any;
+ }
+
+ @Override
+ public String toString() {
+ return Strings.toString(this);
+ }
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/ShardFieldUsageTracker.java b/server/src/main/java/org/elasticsearch/index/search/stats/ShardFieldUsageTracker.java
new file mode 100644
index 0000000000000..fc7d48db8f06f
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/index/search/stats/ShardFieldUsageTracker.java
@@ -0,0 +1,218 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.index.search.stats;
+
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.util.CollectionUtils;
+import org.elasticsearch.core.Releasable;
+import org.elasticsearch.index.search.stats.FieldUsageStats.PerFieldUsageStats;
+import org.elasticsearch.search.internal.FieldUsageTrackingDirectoryReader;
+import org.elasticsearch.search.internal.FieldUsageTrackingDirectoryReader.FieldUsageNotifier;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.LongAdder;
+
+/**
+ * Records and provides field usage stats
+ */
+public class ShardFieldUsageTracker {
+
+ private final Map perFieldStats = new ConcurrentHashMap<>();
+
+ /**
+ * Returns a new session which can be passed to a {@link FieldUsageTrackingDirectoryReader}
+ * to track field usage of a shard. Fields tracked as part of a session are only counted
+ * as a single use. The stats are then recorded for this shard when the corresponding
+ * session is closed.
+ */
+ public FieldUsageStatsTrackingSession createSession() {
+ return new FieldUsageStatsTrackingSession();
+ }
+
+ /**
+ * Returns field usage stats for the given fields. If no subset of fields is specified,
+ * returns information for all fields.
+ */
+ public FieldUsageStats stats(String... fields) {
+ final Map stats = new HashMap<>(perFieldStats.size());
+ for (Map.Entry entry : perFieldStats.entrySet()) {
+ InternalFieldStats ifs = entry.getValue();
+ if (CollectionUtils.isEmpty(fields) || Regex.simpleMatch(fields, entry.getKey())) {
+ PerFieldUsageStats pf = new PerFieldUsageStats(ifs.any.longValue(), ifs.proximity.longValue(), ifs.terms.longValue(),
+ ifs.postings.longValue(), ifs.termFrequencies.longValue(), ifs.positions.longValue(), ifs.offsets.longValue(),
+ ifs.docValues.longValue(), ifs.storedFields.longValue(), ifs.norms.longValue(), ifs.payloads.longValue(),
+ ifs.termVectors.longValue(), ifs.points.longValue());
+ stats.put(entry.getKey(), pf);
+ }
+ }
+ return new FieldUsageStats(Collections.unmodifiableMap(stats));
+ }
+
+ static class InternalFieldStats {
+ final LongAdder any = new LongAdder();
+ final LongAdder proximity = new LongAdder();
+ final LongAdder terms = new LongAdder();
+ final LongAdder postings = new LongAdder();
+ final LongAdder termFrequencies = new LongAdder();
+ final LongAdder positions = new LongAdder();
+ final LongAdder offsets = new LongAdder();
+ final LongAdder docValues = new LongAdder();
+ final LongAdder storedFields = new LongAdder();
+ final LongAdder norms = new LongAdder();
+ final LongAdder payloads = new LongAdder();
+ final LongAdder termVectors = new LongAdder();
+ final LongAdder points = new LongAdder();
+ }
+
+ static class PerField {
+ boolean terms;
+ boolean postings;
+ boolean termFrequencies;
+ boolean positions;
+ boolean offsets;
+ boolean docValues;
+ boolean storedFields;
+ boolean norms;
+ boolean payloads;
+ boolean termVectors;
+ boolean points;
+ }
+
+ public class FieldUsageStatsTrackingSession implements FieldUsageNotifier, Releasable {
+
+ private final Map usages = new HashMap<>();
+
+ @Override
+ public void close() {
+ usages.entrySet().stream().forEach(e -> {
+ InternalFieldStats fieldStats = perFieldStats.computeIfAbsent(e.getKey(), f -> new InternalFieldStats());
+ PerField pf = e.getValue();
+ boolean any = false;
+ boolean proximity = false;
+ if (pf.terms) {
+ any = true;
+ fieldStats.terms.increment();
+ }
+ if (pf.postings) {
+ any = true;
+ fieldStats.postings.increment();
+ }
+ if (pf.termFrequencies) {
+ any = true;
+ fieldStats.termFrequencies.increment();
+ }
+ if (pf.positions) {
+ any = true;
+ proximity = true;
+ fieldStats.positions.increment();
+ }
+ if (pf.offsets) {
+ any = true;
+ proximity = true;
+ fieldStats.offsets.increment();
+ }
+ if (pf.docValues) {
+ any = true;
+ fieldStats.docValues.increment();
+ }
+ if (pf.storedFields) {
+ any = true;
+ fieldStats.storedFields.increment();
+ }
+ if (pf.norms) {
+ any = true;
+ fieldStats.norms.increment();
+ }
+ if (pf.payloads) {
+ any = true;
+ proximity = true;
+ fieldStats.payloads.increment();
+ }
+ if (pf.points) {
+ any = true;
+ fieldStats.points.increment();
+ }
+ if (pf.termVectors) {
+ any = true;
+ fieldStats.termVectors.increment();
+ }
+ if (any) {
+ fieldStats.any.increment();
+ }
+ if (proximity) {
+ fieldStats.proximity.increment();
+ }
+ });
+ }
+
+ private PerField getOrAdd(String fieldName) {
+ Objects.requireNonNull(fieldName, "fieldName must be non-null");
+ return usages.computeIfAbsent(fieldName, k -> new PerField());
+ }
+
+ @Override
+ public void onTermsUsed(String field) {
+ getOrAdd(field).terms = true;
+ }
+
+ @Override
+ public void onPostingsUsed(String field) {
+ getOrAdd(field).postings = true;
+ }
+
+ @Override
+ public void onTermFrequenciesUsed(String field) {
+ getOrAdd(field).termFrequencies = true;
+ }
+
+ @Override
+ public void onPositionsUsed(String field) {
+ getOrAdd(field).positions = true;
+ }
+
+ @Override
+ public void onOffsetsUsed(String field) {
+ getOrAdd(field).offsets = true;
+ }
+
+ @Override
+ public void onDocValuesUsed(String field) {
+ getOrAdd(field).docValues = true;
+ }
+
+ @Override
+ public void onStoredFieldsUsed(String field) {
+ getOrAdd(field).storedFields = true;
+ }
+
+ @Override
+ public void onNormsUsed(String field) {
+ getOrAdd(field).norms = true;
+ }
+
+ @Override
+ public void onPayloadsUsed(String field) {
+ getOrAdd(field).payloads = true;
+ }
+
+ @Override
+ public void onPointsUsed(String field) {
+ getOrAdd(field).points = true;
+ }
+
+ @Override
+ public void onTermVectorsUsed(String field) {
+ getOrAdd(field).termVectors = true;
+ }
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index f58781a13664d..26f544bbcc6e8 100644
--- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -47,6 +47,7 @@
import org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
+import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
@@ -111,6 +112,8 @@
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats;
+import org.elasticsearch.index.search.stats.FieldUsageStats;
+import org.elasticsearch.index.search.stats.ShardFieldUsageTracker;
import org.elasticsearch.index.search.stats.SearchStats;
import org.elasticsearch.index.search.stats.ShardSearchStats;
import org.elasticsearch.index.seqno.ReplicationTracker;
@@ -144,6 +147,7 @@
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.rest.RestStatus;
+import org.elasticsearch.search.internal.FieldUsageTrackingDirectoryReader;
import org.elasticsearch.search.suggest.completion.CompletionStats;
import org.elasticsearch.threadpool.ThreadPool;
@@ -194,6 +198,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
private final Store store;
private final InternalIndexingStats internalIndexingStats;
private final ShardSearchStats searchStats = new ShardSearchStats();
+ private final ShardFieldUsageTracker fieldUsageTracker;
+ private final String shardUuid = UUIDs.randomBase64UUID();
+ private final long shardCreationTime;
private final ShardGetService getService;
private final ShardIndexWarmerService shardWarmerService;
private final ShardRequestCache requestCacheStats;
@@ -354,6 +361,8 @@ public IndexShard(
(retentionLeases, listener) -> retentionLeaseSyncer.sync(shardId, aId, getPendingPrimaryTerm(), retentionLeases, listener),
this::getSafeCommitInfo,
pendingReplicationActions);
+ fieldUsageTracker = new ShardFieldUsageTracker();
+ shardCreationTime = threadPool.absoluteTimeInMillis();
// the query cache is a node-level thing, however we want the most popular filters
// to be computed on a per-shard basis
@@ -443,6 +452,20 @@ public long getOperationPrimaryTerm() {
return replicationTracker.getOperationPrimaryTerm();
}
+ /**
+ * Returns a unique UUID that identifies this IndexShard instance
+ */
+ public String getShardUuid() {
+ return shardUuid;
+ }
+
+ /**
+ * Returns the timestamp at which this IndexShard instance was created
+ */
+ public long getShardCreationTime() {
+ return shardCreationTime;
+ }
+
/**
* Returns the latest cluster routing entry received with this shard.
*/
@@ -1094,6 +1117,10 @@ public SearchStats searchStats(String... groups) {
return searchStats.stats(groups);
}
+ public FieldUsageStats fieldUsageStats(String... fields) {
+ return fieldUsageTracker.stats(fields);
+ }
+
public GetStats getStats() {
return getService.stats();
}
@@ -1359,7 +1386,7 @@ private Engine.Searcher wrapSearcher(Engine.Searcher searcher) {
!= null : "DirectoryReader must be an instance or ElasticsearchDirectoryReader";
boolean success = false;
try {
- final Engine.Searcher newSearcher = readerWrapper == null ? searcher : wrapSearcher(searcher, readerWrapper);
+ final Engine.Searcher newSearcher = wrapSearcher(searcher, fieldUsageTracker.createSession(), readerWrapper);
assert newSearcher != null;
success = true;
return newSearcher;
@@ -1373,37 +1400,38 @@ private Engine.Searcher wrapSearcher(Engine.Searcher searcher) {
}
static Engine.Searcher wrapSearcher(Engine.Searcher engineSearcher,
- CheckedFunction readerWrapper) throws IOException {
- assert readerWrapper != null;
+ ShardFieldUsageTracker.FieldUsageStatsTrackingSession fieldUsageStatsTrackingSession,
+ @Nullable CheckedFunction readerWrapper)
+ throws IOException {
final ElasticsearchDirectoryReader elasticsearchDirectoryReader =
ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader());
if (elasticsearchDirectoryReader == null) {
throw new IllegalStateException("Can't wrap non elasticsearch directory reader");
}
- NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(engineSearcher.getDirectoryReader());
- DirectoryReader reader = readerWrapper.apply(nonClosingReaderWrapper);
- if (reader != nonClosingReaderWrapper) {
- if (reader.getReaderCacheHelper() != elasticsearchDirectoryReader.getReaderCacheHelper()) {
- throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey," +
- " wrappers must override this method and delegate to the original readers core cache key. Wrapped readers can't be " +
- "used as cache keys since their are used only per request which would lead to subtle bugs");
- }
- if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) {
- // prevent that somebody wraps with a non-filter reader
- throw new IllegalStateException("wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't");
- }
- }
-
- if (reader == nonClosingReaderWrapper) {
- return engineSearcher;
- } else {
- // we close the reader to make sure wrappers can release resources if needed....
- // our NonClosingReaderWrapper makes sure that our reader is not closed
- return new Engine.Searcher(engineSearcher.source(), reader,
- engineSearcher.getSimilarity(), engineSearcher.getQueryCache(), engineSearcher.getQueryCachingPolicy(),
- () -> IOUtils.close(reader, // this will close the wrappers excluding the NonClosingReaderWrapper
- engineSearcher)); // this will run the closeable on the wrapped engine reader
+ if (readerWrapper == null) {
+ readerWrapper = r -> r;
}
+ NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(engineSearcher.getDirectoryReader());
+ // first apply field usage stats wrapping before applying other wrappers so that it can track the effects of these wrappers
+ DirectoryReader reader = readerWrapper.apply(
+ new FieldUsageTrackingDirectoryReader(nonClosingReaderWrapper, fieldUsageStatsTrackingSession));
+ if (reader.getReaderCacheHelper() != elasticsearchDirectoryReader.getReaderCacheHelper()) {
+ throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey," +
+ " wrappers must override this method and delegate to the original readers core cache key. Wrapped readers can't be " +
+ "used as cache keys since their are used only per request which would lead to subtle bugs");
+ }
+ if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) {
+ // prevent that somebody wraps with a non-filter reader
+ throw new IllegalStateException("wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't");
+ }
+
+ // we close the reader to make sure wrappers can release resources if needed....
+ // our NonClosingReaderWrapper makes sure that our reader is not closed
+ return new Engine.Searcher(engineSearcher.source(), reader,
+ engineSearcher.getSimilarity(), engineSearcher.getQueryCache(), engineSearcher.getQueryCachingPolicy(),
+ () -> IOUtils.close(reader, // this will close the wrappers excluding the NonClosingReaderWrapper
+ engineSearcher, // this will run the closeable on the wrapped engine reader
+ fieldUsageStatsTrackingSession)); // completes stats recording
}
private static final class NonClosingReaderWrapper extends FilterDirectoryReader {
diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java
new file mode 100644
index 0000000000000..0ea9bc80e4d2f
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestFieldUsageStatsAction.java
@@ -0,0 +1,49 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.rest.action.admin.indices;
+
+import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsAction;
+import org.elasticsearch.action.admin.indices.stats.FieldUsageStatsRequest;
+import org.elasticsearch.action.support.IndicesOptions;
+import org.elasticsearch.client.node.NodeClient;
+import org.elasticsearch.common.Strings;
+import org.elasticsearch.rest.BaseRestHandler;
+import org.elasticsearch.rest.RestHandler;
+import org.elasticsearch.rest.RestRequest;
+import org.elasticsearch.rest.action.RestCancellableNodeClient;
+import org.elasticsearch.rest.action.RestToXContentListener;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+
+public class RestFieldUsageStatsAction extends BaseRestHandler {
+
+ @Override
+ public List routes() {
+ return Collections.singletonList(new RestHandler.Route(RestRequest.Method.GET, "/{index}/_field_usage_stats"));
+ }
+
+ @Override
+ public String getName() {
+ return "field_usage_stats_action";
+ }
+
+ @Override
+ public BaseRestHandler.RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
+ final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
+ final IndicesOptions indicesOptions = IndicesOptions.fromRequest(request, IndicesOptions.strictExpandOpenAndForbidClosed());
+ final FieldUsageStatsRequest fusRequest = new FieldUsageStatsRequest(indices, indicesOptions);
+ fusRequest.fields(request.paramAsStringArray("fields", fusRequest.fields()));
+ return channel -> {
+ final RestCancellableNodeClient cancelClient = new RestCancellableNodeClient(client, request.getHttpChannel());
+ cancelClient.execute(FieldUsageStatsAction.INSTANCE, fusRequest, new RestToXContentListener<>(channel));
+ };
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java b/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java
new file mode 100644
index 0000000000000..34004f7a3ebc3
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/search/internal/FieldUsageTrackingDirectoryReader.java
@@ -0,0 +1,373 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.apache.lucene.codecs.StoredFieldsReader;
+import org.apache.lucene.index.BinaryDocValues;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.Fields;
+import org.apache.lucene.index.FilterDirectoryReader;
+import org.apache.lucene.index.ImpactsEnum;
+import org.apache.lucene.index.LeafReader;
+import org.apache.lucene.index.NumericDocValues;
+import org.apache.lucene.index.PointValues;
+import org.apache.lucene.index.PostingsEnum;
+import org.apache.lucene.index.SortedDocValues;
+import org.apache.lucene.index.SortedNumericDocValues;
+import org.apache.lucene.index.SortedSetDocValues;
+import org.apache.lucene.index.StoredFieldVisitor;
+import org.apache.lucene.index.Terms;
+import org.apache.lucene.index.TermsEnum;
+import org.apache.lucene.search.suggest.document.CompletionTerms;
+import org.apache.lucene.util.BytesRef;
+import org.apache.lucene.util.automaton.CompiledAutomaton;
+import org.elasticsearch.common.lucene.index.SequentialStoredFieldsLeafReader;
+import org.elasticsearch.index.fieldvisitor.FieldNamesProvidingStoredFieldsVisitor;
+
+import java.io.IOException;
+
+/**
+ * Wraps a DirectoryReader and tracks all access to fields, notifying a
+ * {@link FieldUsageNotifier} upon access.
+ */
+public class FieldUsageTrackingDirectoryReader extends FilterDirectoryReader {
+
+ private final FieldUsageNotifier notifier;
+
+ public FieldUsageTrackingDirectoryReader(DirectoryReader in, FieldUsageNotifier notifier) throws IOException {
+ super(in, new FilterDirectoryReader.SubReaderWrapper() {
+ @Override
+ public LeafReader wrap(LeafReader reader) {
+ return new FieldUsageTrackingLeafReader(reader, notifier);
+ }
+ });
+ this.notifier = notifier;
+ }
+
+ @Override
+ protected DirectoryReader doWrapDirectoryReader(DirectoryReader in) throws IOException {
+ return new FieldUsageTrackingDirectoryReader(in, notifier);
+ }
+
+ @Override
+ public CacheHelper getReaderCacheHelper() {
+ return in.getReaderCacheHelper();
+ }
+
+ public interface FieldUsageNotifier {
+ void onTermsUsed(String field);
+ void onPostingsUsed(String field);
+ void onTermFrequenciesUsed(String field);
+ void onPositionsUsed(String field);
+ void onOffsetsUsed(String field);
+ void onDocValuesUsed(String field);
+ void onStoredFieldsUsed(String field);
+ void onNormsUsed(String field);
+ void onPayloadsUsed(String field);
+ void onPointsUsed(String field);
+ void onTermVectorsUsed(String field);
+ }
+
+ public static final class FieldUsageTrackingLeafReader extends SequentialStoredFieldsLeafReader {
+
+ private final FieldUsageNotifier notifier;
+
+ public FieldUsageTrackingLeafReader(LeafReader in, FieldUsageNotifier notifier) {
+ super(in);
+ this.notifier = notifier;
+ }
+
+ @Override
+ public Fields getTermVectors(int docID) throws IOException {
+ Fields f = super.getTermVectors(docID);
+ if (f != null) {
+ f = new FieldUsageTrackingTermVectorFields(f);
+ }
+ return f;
+ }
+
+ @Override
+ public PointValues getPointValues(String field) throws IOException {
+ PointValues pointValues = super.getPointValues(field);
+ if (pointValues != null) {
+ notifier.onPointsUsed(field);
+ }
+ return pointValues;
+ }
+
+ @Override
+ public void document(final int docID, final StoredFieldVisitor visitor) throws IOException {
+ if (visitor instanceof FieldNamesProvidingStoredFieldsVisitor) {
+ super.document(docID, new FieldUsageFieldsVisitor((FieldNamesProvidingStoredFieldsVisitor) visitor));
+ } else {
+ super.document(docID, new FieldUsageStoredFieldVisitor(visitor));
+ }
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ Terms terms = super.terms(field);
+ if (terms != null) {
+ notifier.onTermsUsed(field);
+ // we can't wrap CompletionTerms, as CompletionWeight does an instanceof check.
+ // we also can't create a subclass of CompletionTerms as it is a final class.
+ // TODO: fix in Lucene
+ if (terms instanceof CompletionTerms == false) {
+ terms = new FieldUsageTrackingTerms(field, terms);
+ }
+ }
+ return terms;
+ }
+
+ @Override
+ public BinaryDocValues getBinaryDocValues(String field) throws IOException {
+ BinaryDocValues binaryDocValues = super.getBinaryDocValues(field);
+ if (binaryDocValues != null) {
+ notifier.onDocValuesUsed(field);
+ }
+ return binaryDocValues;
+ }
+
+ @Override
+ public SortedDocValues getSortedDocValues(String field) throws IOException {
+ SortedDocValues sortedDocValues = super.getSortedDocValues(field);
+ if (sortedDocValues != null) {
+ notifier.onDocValuesUsed(field);
+ }
+ return sortedDocValues;
+ }
+
+ @Override
+ public SortedNumericDocValues getSortedNumericDocValues(String field) throws IOException {
+ SortedNumericDocValues sortedNumericDocValues = super.getSortedNumericDocValues(field);
+ if (sortedNumericDocValues != null) {
+ notifier.onDocValuesUsed(field);
+ }
+ return sortedNumericDocValues;
+ }
+
+ @Override
+ public SortedSetDocValues getSortedSetDocValues(String field) throws IOException {
+ SortedSetDocValues sortedSetDocValues = super.getSortedSetDocValues(field);
+ if (sortedSetDocValues != null) {
+ notifier.onDocValuesUsed(field);
+ }
+ return sortedSetDocValues;
+ }
+
+ @Override
+ public NumericDocValues getNormValues(String field) throws IOException {
+ NumericDocValues numericDocValues = super.getNormValues(field);
+ if (numericDocValues != null) {
+ notifier.onNormsUsed(field);
+ }
+ return numericDocValues;
+ }
+
+ @Override
+ public String toString() {
+ final StringBuilder sb = new StringBuilder("FieldUsageTrackingLeafReader(reader=");
+ return sb.append(in).append(')').toString();
+ }
+
+ @Override
+ protected StoredFieldsReader doGetSequentialStoredFieldsReader(StoredFieldsReader reader) {
+ return new FieldUsageTrackingStoredFieldsReader(reader);
+ }
+
+ class FieldUsageTrackingStoredFieldsReader extends StoredFieldsReader {
+ final StoredFieldsReader reader;
+
+ FieldUsageTrackingStoredFieldsReader(StoredFieldsReader reader) {
+ this.reader = reader;
+ }
+
+ @Override
+ public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException {
+ reader.visitDocument(docID, new FieldUsageStoredFieldVisitor(visitor));
+ }
+
+ @Override
+ public StoredFieldsReader clone() {
+ return new FieldUsageTrackingStoredFieldsReader(reader.clone());
+ }
+
+ @Override
+ public StoredFieldsReader getMergeInstance() {
+ return new FieldUsageTrackingStoredFieldsReader(reader.getMergeInstance());
+ }
+
+ @Override
+ public void checkIntegrity() throws IOException {
+ reader.checkIntegrity();
+ }
+
+ @Override
+ public void close() throws IOException {
+ reader.close();
+ }
+
+ @Override
+ public long ramBytesUsed() {
+ return reader.ramBytesUsed();
+ }
+ }
+
+ private class FieldUsageTrackingTerms extends FilterTerms {
+
+ private final String field;
+
+ FieldUsageTrackingTerms(String field, Terms in) {
+ super(in);
+ this.field = field;
+ }
+
+ @Override
+ public TermsEnum iterator() throws IOException {
+ TermsEnum termsEnum = in.iterator();
+ if (termsEnum != null) {
+ termsEnum = new FieldUsageTrackingTermsEnum(field, termsEnum);
+ }
+ return termsEnum;
+ }
+
+ @Override
+ public TermsEnum intersect(CompiledAutomaton compiled, final BytesRef startTerm) throws IOException {
+ TermsEnum termsEnum = in.intersect(compiled, startTerm);
+ if (termsEnum != null) {
+ termsEnum = new FieldUsageTrackingTermsEnum(field, termsEnum);
+ }
+ return termsEnum;
+ }
+
+ @Override
+ public long getSumTotalTermFreq() throws IOException {
+ long totalTermFreq = super.getSumTotalTermFreq();
+ notifier.onTermFrequenciesUsed(field);
+ return totalTermFreq;
+ }
+
+ @Override
+ public long getSumDocFreq() throws IOException {
+ return in.getSumDocFreq();
+ }
+ }
+
+ private class FieldUsageTrackingTermsEnum extends FilterTermsEnum {
+
+ private final String field;
+
+ FieldUsageTrackingTermsEnum(String field, TermsEnum in) {
+ super(in);
+ this.field = field;
+ }
+
+ @Override
+ public long totalTermFreq() throws IOException {
+ long totalTermFreq = super.totalTermFreq();
+ notifier.onTermFrequenciesUsed(field);
+ return totalTermFreq;
+ }
+
+ @Override
+ public PostingsEnum postings(PostingsEnum reuse, int flags) throws IOException {
+ PostingsEnum postingsEnum = super.postings(reuse, flags);
+ if (postingsEnum != null) {
+ notifier.onPostingsUsed(field);
+ checkPostingsFlags(flags);
+ }
+ return postingsEnum;
+ }
+
+ @Override
+ public ImpactsEnum impacts(int flags) throws IOException {
+ ImpactsEnum impactsEnum = super.impacts(flags);
+ if (impactsEnum != null) {
+ notifier.onPostingsUsed(field);
+ checkPostingsFlags(flags);
+ }
+ return impactsEnum;
+ }
+
+ private void checkPostingsFlags(int flags) {
+ if (PostingsEnum.featureRequested(flags, PostingsEnum.FREQS)) {
+ notifier.onTermFrequenciesUsed(field);
+ }
+ if (PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS)) {
+ notifier.onPositionsUsed(field);
+ }
+ if (PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS)) {
+ notifier.onOffsetsUsed(field);
+ }
+ if (PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS)) {
+ notifier.onPayloadsUsed(field);
+ }
+ }
+
+ }
+
+ private class FieldUsageTrackingTermVectorFields extends FilterFields {
+
+ FieldUsageTrackingTermVectorFields(Fields in) {
+ super(in);
+ }
+
+ @Override
+ public Terms terms(String field) throws IOException {
+ Terms terms = super.terms(field);
+ if (terms != null) {
+ notifier.onTermVectorsUsed(field);
+ }
+ return terms;
+ }
+
+ }
+
+ private class FieldUsageStoredFieldVisitor extends FilterStoredFieldVisitor {
+ FieldUsageStoredFieldVisitor(StoredFieldVisitor visitor) {
+ super(visitor);
+ }
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ Status status = super.needsField(fieldInfo);
+ if (status == Status.YES) {
+ notifier.onStoredFieldsUsed(fieldInfo.name);
+ }
+ return status;
+ }
+ }
+
+ private class FieldUsageFieldsVisitor extends FilterFieldNamesProvidingStoredFieldsVisitor {
+ FieldUsageFieldsVisitor(FieldNamesProvidingStoredFieldsVisitor visitor) {
+ super(visitor);
+ }
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ Status status = super.needsField(fieldInfo);
+ if (status == Status.YES) {
+ notifier.onStoredFieldsUsed(fieldInfo.name);
+ }
+ return status;
+ }
+ }
+
+ @Override
+ public CacheHelper getCoreCacheHelper() {
+ return in.getCoreCacheHelper();
+ }
+
+ @Override
+ public CacheHelper getReaderCacheHelper() {
+ return in.getReaderCacheHelper();
+ }
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilterFieldNamesProvidingStoredFieldsVisitor.java b/server/src/main/java/org/elasticsearch/search/internal/FilterFieldNamesProvidingStoredFieldsVisitor.java
new file mode 100644
index 0000000000000..329a746f075a8
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/search/internal/FilterFieldNamesProvidingStoredFieldsVisitor.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.apache.lucene.index.FieldInfo;
+import org.elasticsearch.index.fieldvisitor.FieldNamesProvidingStoredFieldsVisitor;
+
+import java.io.IOException;
+import java.util.Set;
+
+public class FilterFieldNamesProvidingStoredFieldsVisitor extends FieldNamesProvidingStoredFieldsVisitor {
+
+ private final FieldNamesProvidingStoredFieldsVisitor visitor;
+
+ public FilterFieldNamesProvidingStoredFieldsVisitor(FieldNamesProvidingStoredFieldsVisitor visitor) {
+ this.visitor = visitor;
+ }
+
+ @Override
+ public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException {
+ visitor.binaryField(fieldInfo, value);
+ }
+
+ @Override
+ public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException {
+ visitor.stringField(fieldInfo, value);
+ }
+
+ @Override
+ public void intField(FieldInfo fieldInfo, int value) throws IOException {
+ visitor.intField(fieldInfo, value);
+ }
+
+ @Override
+ public void longField(FieldInfo fieldInfo, long value) throws IOException {
+ visitor.longField(fieldInfo, value);
+ }
+
+ @Override
+ public void floatField(FieldInfo fieldInfo, float value) throws IOException {
+ visitor.floatField(fieldInfo, value);
+ }
+
+ @Override
+ public void doubleField(FieldInfo fieldInfo, double value) throws IOException {
+ visitor.doubleField(fieldInfo, value);
+ }
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ return visitor.needsField(fieldInfo);
+ }
+
+ @Override
+ public Set getFieldNames() {
+ return visitor.getFieldNames();
+ }
+}
diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilterStoredFieldVisitor.java b/server/src/main/java/org/elasticsearch/search/internal/FilterStoredFieldVisitor.java
new file mode 100644
index 0000000000000..a695fae27346d
--- /dev/null
+++ b/server/src/main/java/org/elasticsearch/search/internal/FilterStoredFieldVisitor.java
@@ -0,0 +1,57 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0 and the Server Side Public License, v 1; you may not use this file except
+ * in compliance with, at your election, the Elastic License 2.0 or the Server
+ * Side Public License, v 1.
+ */
+
+package org.elasticsearch.search.internal;
+
+import org.apache.lucene.index.FieldInfo;
+import org.apache.lucene.index.StoredFieldVisitor;
+
+import java.io.IOException;
+
+public class FilterStoredFieldVisitor extends StoredFieldVisitor {
+ private final StoredFieldVisitor visitor;
+
+ public FilterStoredFieldVisitor(StoredFieldVisitor visitor) {
+ this.visitor = visitor;
+ }
+
+ @Override
+ public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException {
+ visitor.binaryField(fieldInfo, value);
+ }
+
+ @Override
+ public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException {
+ visitor.stringField(fieldInfo, value);
+ }
+
+ @Override
+ public void intField(FieldInfo fieldInfo, int value) throws IOException {
+ visitor.intField(fieldInfo, value);
+ }
+
+ @Override
+ public void longField(FieldInfo fieldInfo, long value) throws IOException {
+ visitor.longField(fieldInfo, value);
+ }
+
+ @Override
+ public void floatField(FieldInfo fieldInfo, float value) throws IOException {
+ visitor.floatField(fieldInfo, value);
+ }
+
+ @Override
+ public void doubleField(FieldInfo fieldInfo, double value) throws IOException {
+ visitor.doubleField(fieldInfo, value);
+ }
+
+ @Override
+ public Status needsField(FieldInfo fieldInfo) throws IOException {
+ return visitor.needsField(fieldInfo);
+ }
+}
diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java
index cc83ff734f051..22f7f533e738a 100644
--- a/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java
+++ b/server/src/test/java/org/elasticsearch/index/shard/IndexReaderWrapperTests.java
@@ -26,6 +26,8 @@
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.search.stats.ShardFieldUsageTracker;
+import org.elasticsearch.search.internal.FieldUsageTrackingDirectoryReader;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
@@ -34,6 +36,9 @@
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
+import static org.hamcrest.Matchers.instanceOf;
+import static org.mockito.Mockito.mock;
+
public class IndexReaderWrapperTests extends ESTestCase {
public void testReaderCloseListenerIsCalled() throws IOException {
@@ -56,7 +61,7 @@ public void testReaderCloseListenerIsCalled() throws IOException {
final AtomicBoolean closeCalled = new AtomicBoolean(false);
final Engine.Searcher wrap = IndexShard.wrapSearcher(new Engine.Searcher("foo", open,
IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(),
- () -> closeCalled.set(true)), wrapper);
+ () -> closeCalled.set(true)), mock(ShardFieldUsageTracker.FieldUsageStatsTrackingSession.class), wrapper);
assertEquals(1, wrap.getIndexReader().getRefCount());
ElasticsearchDirectoryReader.addReaderCloseListener(wrap.getDirectoryReader(), key -> {
if (key == open.getReaderCacheHelper().getKey()) {
@@ -97,7 +102,7 @@ public void testIsCacheable() throws IOException {
AtomicBoolean closeCalled = new AtomicBoolean(false);
try (Engine.Searcher wrap = IndexShard.wrapSearcher(new Engine.Searcher("foo", open,
IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(),
- () -> closeCalled.set(true)), wrapper)) {
+ () -> closeCalled.set(true)), mock(ShardFieldUsageTracker.FieldUsageStatsTrackingSession.class), wrapper)) {
ElasticsearchDirectoryReader.addReaderCloseListener(wrap.getDirectoryReader(), key -> {
cache.remove(key);
});
@@ -113,7 +118,7 @@ public void testIsCacheable() throws IOException {
assertEquals(1, closeCalls.get());
}
- public void testNoWrap() throws IOException {
+ public void testAlwaysWrapWithFieldUsageTrackingDirectoryReader() throws IOException {
Directory dir = newDirectory();
IndexWriterConfig iwc = newIndexWriterConfig();
IndexWriter writer = new IndexWriter(dir, iwc);
@@ -128,9 +133,11 @@ public void testNoWrap() throws IOException {
CheckedFunction wrapper = directoryReader -> directoryReader;
try (Engine.Searcher engineSearcher = IndexShard.wrapSearcher(new Engine.Searcher("foo", open,
IndexSearcher.getDefaultSimilarity(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(),
- open::close), wrapper)) {
- final Engine.Searcher wrap = IndexShard.wrapSearcher(engineSearcher, wrapper);
- assertSame(wrap, engineSearcher);
+ open::close), mock(ShardFieldUsageTracker.FieldUsageStatsTrackingSession.class), wrapper)) {
+ final Engine.Searcher wrap = IndexShard.wrapSearcher(engineSearcher,
+ mock(ShardFieldUsageTracker.FieldUsageStatsTrackingSession.class), wrapper);
+ assertNotSame(wrap, engineSearcher);
+ assertThat(wrap.getDirectoryReader(), instanceOf(FieldUsageTrackingDirectoryReader.class));
}
IOUtils.close(writer, dir);
}
diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/SearcherHelper.java b/test/framework/src/main/java/org/elasticsearch/index/shard/SearcherHelper.java
index c4db0e921fb1b..c3b77eab1cbd2 100644
--- a/test/framework/src/main/java/org/elasticsearch/index/shard/SearcherHelper.java
+++ b/test/framework/src/main/java/org/elasticsearch/index/shard/SearcherHelper.java
@@ -11,16 +11,20 @@
import org.apache.lucene.index.DirectoryReader;
import org.elasticsearch.core.CheckedFunction;
import org.elasticsearch.index.engine.Engine;
+import org.elasticsearch.index.search.stats.ShardFieldUsageTracker;
import java.io.IOException;
import java.io.UncheckedIOException;
+import static org.mockito.Mockito.mock;
+
public class SearcherHelper {
public static Engine.Searcher wrapSearcher(Engine.Searcher engineSearcher,
CheckedFunction readerWrapper) {
try {
- return IndexShard.wrapSearcher(engineSearcher, readerWrapper);
+ return IndexShard.wrapSearcher(engineSearcher, mock(ShardFieldUsageTracker.FieldUsageStatsTrackingSession.class),
+ readerWrapper);
} catch (IOException e) {
throw new UncheckedIOException(e);
}
diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java
index 911daaa319cd1..d0db4d6545e7c 100644
--- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java
+++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForNoFollowersStepTests.java
@@ -237,8 +237,7 @@ private ShardStats randomShardStats(boolean isLeaderIndex) {
null,
null,
null,
- randomRetentionLeaseStats(isLeaderIndex)
- );
+ randomRetentionLeaseStats(isLeaderIndex));
}
private RetentionLeaseStats randomRetentionLeaseStats(boolean isLeaderIndex) {
diff --git a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java
index 4e0e94583a46b..a5897ee3540e5 100644
--- a/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java
+++ b/x-pack/plugin/security/qa/operator-privileges-tests/src/javaRestTest/java/org/elasticsearch/xpack/security/operator/Constants.java
@@ -390,6 +390,7 @@ public class Constants {
"indices:monitor/shard_stores",
"indices:monitor/stats",
"indices:monitor/upgrade",
+ "indices:monitor/field_usage_stats",
"internal:admin/ccr/internal_repository/delete",
"internal:admin/ccr/internal_repository/put",
"internal:admin/ccr/restore/file_chunk/get",