diff --git a/docs/reference/cluster/stats.asciidoc b/docs/reference/cluster/stats.asciidoc index 50eec803053c2..cc9b41f8b96df 100644 --- a/docs/reference/cluster/stats.asciidoc +++ b/docs/reference/cluster/stats.asciidoc @@ -438,6 +438,19 @@ The API returns the following response: "fixed_bit_set_memory_in_bytes": 0, "max_unsafe_auto_id_timestamp" : -9223372036854775808, "file_sizes": {} + }, + "mappings": { + "field_types": [] + }, + "analysis": { + "char_filter_types": [], + "tokenizer_types": [], + "filter_types": [], + "analyzer_types": [], + "built_in_char_filters": [], + "built_in_tokenizers": [], + "built_in_filters": [], + "built_in_analyzers": [] } }, "nodes": { @@ -554,6 +567,7 @@ The API returns the following response: // TESTRESPONSE[s/"processor_stats": \{[^\}]*\}/"processor_stats": $body.$_path/] // TESTRESPONSE[s/"count": \{[^\}]*\}/"count": $body.$_path/] // TESTRESPONSE[s/"packaging_types": \[[^\]]*\]/"packaging_types": $body.$_path/] +// TESTRESPONSE[s/"field_types": \[[^\]]*\]/"field_types": $body.$_path/] // TESTRESPONSE[s/: true|false/: $body.$_path/] // TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] // TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] diff --git a/modules/analysis-common/src/test/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml b/modules/analysis-common/src/test/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml new file mode 100644 index 0000000000000..c9f08b5652ed2 --- /dev/null +++ b/modules/analysis-common/src/test/resources/rest-api-spec/test/cluster.stats/10_analysis_stats.yml @@ -0,0 +1,122 @@ +--- +"get cluster stats returns analysis stats": + + - skip: + version: " - 7.99.99" # TODO: 7.6.99 + reason: "analysis stats are added for v7.7.0" + + - do: + cluster.stats: {} + + - length: { indices.analysis.char_filter_types: 0 } + - length: { indices.analysis.tokenizer_types: 0 } + - length: { indices.analysis.filter_types: 0 } + - length: { indices.analysis.analyzer_types: 0 } + + - length: { indices.analysis.built_in_char_filters: 0 } + - length: { indices.analysis.built_in_tokenizers: 0 } + - length: { indices.analysis.built_in_filters: 0 } + - length: { indices.analysis.built_in_analyzers: 0 } + + - do: + indices.create: + index: test-index1 + body: + settings: + analysis: + char_filter: + c: + type: mapping + mappings: [ "a => b" ] + tokenizer: + tok: + type: pattern + pattern: "," + filter: + st: + type: stop + stopwords: [ "a" ] + st2: + type: stop + stopwords: [ "b" ] + analyzer: + en: + type: standard + stopwords: "_english_" + cust: + char_filter: [ "html_strip" ] + tokenizer: "keyword" + filter: [ "trim" ] + mappings: + properties: + message: + type: "text" + analyzer: french + search_analyzer: spanish + search_quote_analyzer: german + description: + type: "text" + analyzer: french + + - do: + indices.create: + index: test-index2 + body: + mappings: + properties: + message: + type: "text" + analyzer: spanish + + - do: + cluster.stats: {} + + - length: { indices.analysis.char_filter_types: 1 } + - match: { indices.analysis.char_filter_types.0.name: mapping } + - match: { indices.analysis.char_filter_types.0.count: 1 } + - match: { indices.analysis.char_filter_types.0.index_count: 1 } + + - length: { indices.analysis.tokenizer_types: 1 } + - match: { indices.analysis.tokenizer_types.0.name: pattern } + - match: { indices.analysis.tokenizer_types.0.count: 1 } + - match: { indices.analysis.tokenizer_types.0.index_count: 1 } + + - length: { indices.analysis.filter_types: 1 } + - match: { indices.analysis.filter_types.0.name: stop } + - match: { indices.analysis.filter_types.0.count: 2 } + - match: { indices.analysis.filter_types.0.index_count: 1 } + + - length: { indices.analysis.analyzer_types: 2 } + - match: { indices.analysis.analyzer_types.0.name: custom } + - match: { indices.analysis.analyzer_types.0.count: 1 } + - match: { indices.analysis.analyzer_types.0.index_count: 1 } + - match: { indices.analysis.analyzer_types.1.name: standard } + - match: { indices.analysis.analyzer_types.1.count: 1 } + - match: { indices.analysis.analyzer_types.1.index_count: 1 } + + - length: { indices.analysis.built_in_char_filters: 1 } + - match: { indices.analysis.built_in_char_filters.0.name: html_strip } + - match: { indices.analysis.built_in_char_filters.0.count: 1 } + - match: { indices.analysis.built_in_char_filters.0.index_count: 1 } + + - length: { indices.analysis.built_in_tokenizers: 1 } + - match: { indices.analysis.built_in_tokenizers.0.name: keyword } + - match: { indices.analysis.built_in_tokenizers.0.count: 1 } + - match: { indices.analysis.built_in_tokenizers.0.index_count: 1 } + + - length: { indices.analysis.built_in_filters: 1 } + - match: { indices.analysis.built_in_filters.0.name: trim } + - match: { indices.analysis.built_in_filters.0.count: 1 } + - match: { indices.analysis.built_in_filters.0.index_count: 1 } + + - length: { indices.analysis.built_in_analyzers: 3 } + - match: { indices.analysis.built_in_analyzers.0.name: french } + - match: { indices.analysis.built_in_analyzers.0.count: 2 } + - match: { indices.analysis.built_in_analyzers.0.index_count: 1 } + - match: { indices.analysis.built_in_analyzers.1.name: german } + - match: { indices.analysis.built_in_analyzers.1.count: 1 } + - match: { indices.analysis.built_in_analyzers.1.index_count: 1 } + - match: { indices.analysis.built_in_analyzers.2.name: spanish } + - match: { indices.analysis.built_in_analyzers.2.count: 2 } + - match: { indices.analysis.built_in_analyzers.2.index_count: 2 } + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml index c58d959c934ca..520d896eb126c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.stats/10_basic.yml @@ -83,3 +83,57 @@ cluster.stats: {} - is_true: nodes.packaging_types + +--- +"get cluster stats returns mapping stats": + + - skip: + version: " - 7.99.99" # TODO: 7.6.99 + reason: "mapping stats are added for v7.7.0" + + - do: + cluster.stats: {} + + - length: { indices.mappings.field_types: 0 } + + - do: + indices.create: + index: test-index1 + body: + mappings: + properties: + foo: + type: keyword + + - do: + indices.create: + index: test-index2 + body: + mappings: + properties: + foo: + type: keyword + bar: + properties: + quux: + type: integer + baz: + type: keyword + + - do: + cluster.stats: {} + + - length: { indices.mappings.field_types: 3 } + + - match: { indices.mappings.field_types.0.name: integer } + - match: { indices.mappings.field_types.0.count: 1 } + - match: { indices.mappings.field_types.0.index_count: 1 } + + - match: { indices.mappings.field_types.1.name: keyword } + - match: { indices.mappings.field_types.1.count: 3 } + - match: { indices.mappings.field_types.1.index_count: 2 } + + - match: { indices.mappings.field_types.2.name: object } + - match: { indices.mappings.field_types.2.count: 1 } + - match: { indices.mappings.field_types.2.index_count: 1 } + diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java new file mode 100644 index 0000000000000..884ce6a36175c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java @@ -0,0 +1,319 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Statistics about analysis usage. + */ +public final class AnalysisStats implements ToXContentFragment, Writeable { + + /** + * Create {@link AnalysisStats} from the given cluster state. + */ + public static AnalysisStats of(ClusterState state) { + final Map usedCharFilterTypes = new HashMap<>(); + final Map usedTokenizerTypes = new HashMap<>(); + final Map usedTokenFilterTypes = new HashMap<>(); + final Map usedAnalyzerTypes = new HashMap<>(); + final Map usedBuiltInCharFilters = new HashMap<>(); + final Map usedBuiltInTokenizers = new HashMap<>(); + final Map usedBuiltInTokenFilters = new HashMap<>(); + final Map usedBuiltInAnalyzers = new HashMap<>(); + + for (IndexMetaData indexMetaData : state.metaData()) { + Set indexAnalyzers = new HashSet<>(); + MappingMetaData mappingMetaData = indexMetaData.mapping(); + if (mappingMetaData != null) { + MappingVisitor.visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> { + for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) { + Object analyzerO = fieldMapping.get(key); + if (analyzerO != null) { + final String analyzer = analyzerO.toString(); + IndexFeatureStats stats = usedBuiltInAnalyzers.computeIfAbsent(analyzer, IndexFeatureStats::new); + stats.count++; + if (indexAnalyzers.add(analyzer)) { + stats.indexCount++; + } + } + } + }); + } + + Set indexCharFilters = new HashSet<>(); + Set indexTokenizers = new HashSet<>(); + Set indexTokenFilters = new HashSet<>(); + + Set indexAnalyzerTypes = new HashSet<>(); + Set indexCharFilterTypes = new HashSet<>(); + Set indexTokenizerTypes = new HashSet<>(); + Set indexTokenFilterTypes = new HashSet<>(); + + Settings indexSettings = indexMetaData.getSettings(); + Map analyzerSettings = indexSettings.getGroups("index.analysis.analyzer"); + usedBuiltInAnalyzers.keySet().removeAll(analyzerSettings.keySet()); + for (Settings analyzerSetting : analyzerSettings.values()) { + final String analyzerType = analyzerSetting.get("type", "custom"); + IndexFeatureStats stats = usedAnalyzerTypes.computeIfAbsent(analyzerType, IndexFeatureStats::new); + stats.count++; + if (indexAnalyzerTypes.add(analyzerType)) { + stats.indexCount++; + } + + for (String charFilter : analyzerSetting.getAsList("char_filter")) { + stats = usedBuiltInCharFilters.computeIfAbsent(charFilter, IndexFeatureStats::new); + stats.count++; + if (indexCharFilters.add(charFilter)) { + stats.indexCount++; + } + } + + String tokenizer = analyzerSetting.get("tokenizer"); + if (tokenizer != null) { + stats = usedBuiltInTokenizers.computeIfAbsent(tokenizer, IndexFeatureStats::new); + stats.count++; + if (indexTokenizers.add(tokenizer)) { + stats.indexCount++; + } + } + + for (String filter : analyzerSetting.getAsList("filter")) { + stats = usedBuiltInTokenFilters.computeIfAbsent(filter, IndexFeatureStats::new); + stats.count++; + if (indexTokenFilters.add(filter)) { + stats.indexCount++; + } + } + } + + Map charFilterSettings = indexSettings.getGroups("index.analysis.char_filter"); + usedBuiltInCharFilters.keySet().removeAll(charFilterSettings.keySet()); + aggregateAnalysisTypes(charFilterSettings.values(), usedCharFilterTypes, indexCharFilterTypes); + + Map tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer"); + usedBuiltInTokenizers.keySet().removeAll(tokenizerSettings.keySet()); + aggregateAnalysisTypes(tokenizerSettings.values(), usedTokenizerTypes, indexTokenizerTypes); + + Map tokenFilterSettings = indexSettings.getGroups("index.analysis.filter"); + usedBuiltInTokenFilters.keySet().removeAll(tokenFilterSettings.keySet()); + aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilterTypes, indexTokenFilterTypes); + } + return new AnalysisStats(usedCharFilterTypes.values(), usedTokenizerTypes.values(), usedTokenFilterTypes.values(), + usedAnalyzerTypes.values(), usedBuiltInCharFilters.values(), usedBuiltInTokenizers.values(), + usedBuiltInTokenFilters.values(), usedBuiltInAnalyzers.values()); + } + + private static void aggregateAnalysisTypes( + Collection settings, + Map stats, + Set indexTypes) { + for (Settings analysisComponentSettings : settings) { + final String type = analysisComponentSettings.get("type"); + if (type != null) { + IndexFeatureStats s = stats.computeIfAbsent(type, IndexFeatureStats::new); + s.count++; + if (indexTypes.add(type)) { + s.indexCount++; + } + } + } + } + + private static Set sort(Collection set) { + List list = new ArrayList<>(set); + list.sort(Comparator.comparing(IndexFeatureStats::getName)); + return Collections.unmodifiableSet(new LinkedHashSet<>(list)); + } + + private final Set usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers; + private final Set usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers; + + AnalysisStats( + Collection usedCharFilters, + Collection usedTokenizers, + Collection usedTokenFilters, + Collection usedAnalyzers, + Collection usedBuiltInCharFilters, + Collection usedBuiltInTokenizers, + Collection usedBuiltInTokenFilters, + Collection usedBuiltInAnalyzers) { + this.usedCharFilters = sort(usedCharFilters); + this.usedTokenizers = sort(usedTokenizers); + this.usedTokenFilters = sort(usedTokenFilters); + this.usedAnalyzers = sort(usedAnalyzers); + this.usedBuiltInCharFilters = sort(usedBuiltInCharFilters); + this.usedBuiltInTokenizers = sort(usedBuiltInTokenizers); + this.usedBuiltInTokenFilters = sort(usedBuiltInTokenFilters); + this.usedBuiltInAnalyzers = sort(usedBuiltInAnalyzers); + } + + public AnalysisStats(StreamInput input) throws IOException { + usedCharFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + usedTokenizers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + usedTokenFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + usedAnalyzers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + usedBuiltInCharFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + usedBuiltInTokenizers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + usedBuiltInTokenFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + usedBuiltInAnalyzers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(usedCharFilters); + out.writeCollection(usedTokenizers); + out.writeCollection(usedTokenFilters); + out.writeCollection(usedAnalyzers); + out.writeCollection(usedBuiltInCharFilters); + out.writeCollection(usedBuiltInTokenizers); + out.writeCollection(usedBuiltInTokenFilters); + out.writeCollection(usedBuiltInAnalyzers); + } + + /** + * Return the set of used char filters in the cluster. + */ + public Set getUsedCharFilterTypes() { + return usedCharFilters; + } + + /** + * Return the set of used tokenizers in the cluster. + */ + public Set getUsedTokenizerTypes() { + return usedTokenizers; + } + + /** + * Return the set of used token filters in the cluster. + */ + public Set getUsedTokenFilterTypes() { + return usedTokenFilters; + } + + /** + * Return the set of used analyzers in the cluster. + */ + public Set getUsedAnalyzerTypes() { + return usedAnalyzers; + } + + /** + * Return the set of used built-in char filters in the cluster. + */ + public Set getUsedBuiltInCharFilters() { + return usedBuiltInCharFilters; + } + + /** + * Return the set of used built-in tokenizers in the cluster. + */ + public Set getUsedBuiltInTokenizers() { + return usedBuiltInTokenizers; + } + + /** + * Return the set of used built-in token filters in the cluster. + */ + public Set getUsedBuiltInTokenFilters() { + return usedBuiltInTokenFilters; + } + + /** + * Return the set of used built-in analyzers in the cluster. + */ + public Set getUsedBuiltInAnalyzers() { + return usedBuiltInAnalyzers; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AnalysisStats that = (AnalysisStats) o; + return Objects.equals(usedCharFilters, that.usedCharFilters) && + Objects.equals(usedTokenizers, that.usedTokenizers) && + Objects.equals(usedTokenFilters, that.usedTokenFilters) && + Objects.equals(usedAnalyzers, that.usedAnalyzers) && + Objects.equals(usedBuiltInCharFilters, that.usedBuiltInCharFilters) && + Objects.equals(usedBuiltInTokenizers, that.usedBuiltInTokenizers) && + Objects.equals(usedBuiltInTokenFilters, that.usedBuiltInTokenFilters) && + Objects.equals(usedBuiltInAnalyzers, that.usedBuiltInAnalyzers); + } + + @Override + public int hashCode() { + return Objects.hash(usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers, usedBuiltInCharFilters, + usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers); + } + + private void toXContentCollection(XContentBuilder builder, Params params, String name, Collection coll) + throws IOException { + builder.startArray(name); + for (ToXContent toXContent : coll) { + toXContent.toXContent(builder, params); + } + builder.endArray(); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("analysis"); + toXContentCollection(builder, params, "char_filter_types", usedCharFilters); + toXContentCollection(builder, params, "tokenizer_types", usedTokenizers); + toXContentCollection(builder, params, "filter_types", usedTokenFilters); + toXContentCollection(builder, params, "analyzer_types", usedAnalyzers); + toXContentCollection(builder, params, "built_in_char_filters", usedBuiltInCharFilters); + toXContentCollection(builder, params, "built_in_tokenizers", usedBuiltInTokenizers); + toXContentCollection(builder, params, "built_in_filters", usedBuiltInTokenFilters); + toXContentCollection(builder, params, "built_in_analyzers", usedBuiltInAnalyzers); + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index 63ef4723a5c78..24e5de14e2a26 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -45,8 +45,12 @@ public class ClusterStatsIndices implements ToXContentFragment { private QueryCacheStats queryCache; private CompletionStats completion; private SegmentsStats segments; + private AnalysisStats analysis; + private MappingStats mappings; - public ClusterStatsIndices(List nodeResponses) { + public ClusterStatsIndices(List nodeResponses, + MappingStats mappingStats, + AnalysisStats analysisStats) { ObjectObjectHashMap countsPerIndex = new ObjectObjectHashMap<>(); this.docs = new DocsStats(); @@ -85,6 +89,9 @@ public ClusterStatsIndices(List nodeResponses) { for (ObjectObjectCursor indexCountsCursor : countsPerIndex) { shards.addIndexShardCount(indexCountsCursor.value); } + + this.mappings = mappingStats; + this.analysis = analysisStats; } public int getIndexCount() { @@ -119,6 +126,14 @@ public SegmentsStats getSegments() { return segments; } + public MappingStats getMappings() { + return mappings; + } + + public AnalysisStats getAnalysis() { + return analysis; + } + static final class Fields { static final String COUNT = "count"; } @@ -133,6 +148,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws queryCache.toXContent(builder, params); completion.toXContent(builder, params); segments.toXContent(builder, params); + if (mappings != null) { + mappings.toXContent(builder, params); + } + if (analysis != null) { + analysis.toXContent(builder, params); + } return builder; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index c13da033aec7b..37addfab9bfa9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -19,9 +19,11 @@ package org.elasticsearch.action.admin.cluster.stats; +import org.elasticsearch.Version; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,29 +38,45 @@ public class ClusterStatsResponse extends BaseNodesResponse implements ToXContentFragment { - ClusterStatsNodes nodesStats; - ClusterStatsIndices indicesStats; - ClusterHealthStatus status; - long timestamp; - String clusterUUID; + final ClusterStatsNodes nodesStats; + final ClusterStatsIndices indicesStats; + final ClusterHealthStatus status; + final long timestamp; + final String clusterUUID; public ClusterStatsResponse(StreamInput in) throws IOException { super(in); timestamp = in.readVLong(); // it may be that the master switched on us while doing the operation. In this case the status may be null. status = in.readOptionalWriteable(ClusterHealthStatus::readFrom); + + String clusterUUID = null; + MappingStats mappingStats = null; + AnalysisStats analysisStats = null; + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { // TODO: 7_7_0 + clusterUUID = in.readOptionalString(); + mappingStats = in.readOptionalWriteable(MappingStats::new); + analysisStats = in.readOptionalWriteable(AnalysisStats::new); + } + this.clusterUUID = clusterUUID; + + // built from nodes rather than from the stream directly + nodesStats = new ClusterStatsNodes(getNodes()); + indicesStats = new ClusterStatsIndices(getNodes(), mappingStats, analysisStats); } public ClusterStatsResponse(long timestamp, String clusterUUID, ClusterName clusterName, List nodes, - List failures) { + List failures, + ClusterState state) { super(clusterName, nodes, failures); this.clusterUUID = clusterUUID; this.timestamp = timestamp; nodesStats = new ClusterStatsNodes(nodes); - indicesStats = new ClusterStatsIndices(nodes); + indicesStats = new ClusterStatsIndices(nodes, MappingStats.of(state), AnalysisStats.of(state)); + ClusterHealthStatus status = null; for (ClusterStatsNodeResponse response : nodes) { // only the master node populates the status if (response.clusterStatus() != null) { @@ -66,6 +84,7 @@ public ClusterStatsResponse(long timestamp, break; } } + this.status = status; } public String getClusterUUID() { @@ -93,17 +112,16 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVLong(timestamp); out.writeOptionalWriteable(status); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeOptionalString(clusterUUID); + out.writeOptionalWriteable(indicesStats.getMappings()); + out.writeOptionalWriteable(indicesStats.getAnalysis()); + } } @Override protected List readNodesFrom(StreamInput in) throws IOException { - List nodes = in.readList(ClusterStatsNodeResponse::readNodeResponse); - - // built from nodes rather than from the stream directly - nodesStats = new ClusterStatsNodes(nodes); - indicesStats = new ClusterStatsIndices(nodes); - - return nodes; + return in.readList(ClusterStatsNodeResponse::readNodeResponse); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/IndexFeatureStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/IndexFeatureStats.java new file mode 100644 index 0000000000000..3a3461092eaef --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/IndexFeatureStats.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Statistics about an index feature. + */ +public final class IndexFeatureStats implements ToXContent, Writeable { + + final String name; + int count; + int indexCount; + + IndexFeatureStats(String name) { + this.name = Objects.requireNonNull(name); + } + + IndexFeatureStats(StreamInput in) throws IOException { + this.name = in.readString(); + this.count = in.readVInt(); + this.indexCount = in.readVInt(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeVInt(count); + out.writeVInt(indexCount); + } + + /** + * Return the name of the field type. + */ + public String getName() { + return name; + } + + /** + * Return the number of times this feature is used across the cluster. + */ + public int getCount() { + return count; + } + + /** + * Return the number of indices that use this feature across the cluster. + */ + public int getIndexCount() { + return indexCount; + } + + @Override + public boolean equals(Object other) { + if (other instanceof IndexFeatureStats == false) { + return false; + } + IndexFeatureStats that = (IndexFeatureStats) other; + return name.equals(that.name) && count == that.count && indexCount == that.indexCount; + } + + @Override + public int hashCode() { + return Objects.hash(name, count, indexCount); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", name); + builder.field("count", count); + builder.field("index_count", indexCount); + builder.endObject(); + return builder; + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java new file mode 100644 index 0000000000000..8892ca9dcf11e --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -0,0 +1,134 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ToXContentFragment; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +/** + * Usage statistics about mappings usage. + */ +public final class MappingStats implements ToXContentFragment, Writeable { + + /** + * Create {@link MappingStats} from the given cluster state. + */ + public static MappingStats of(ClusterState state) { + Map fieldTypes = new HashMap<>(); + for (IndexMetaData indexMetaData : state.metaData()) { + Set indexFieldTypes = new HashSet<>(); + MappingMetaData mappingMetaData = indexMetaData.mapping(); + if (mappingMetaData != null) { + MappingVisitor.visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> { + String type = null; + Object typeO = fieldMapping.get("type"); + if (typeO != null) { + type = typeO.toString(); + } else if (fieldMapping.containsKey("properties")) { + type = "object"; + } + + if (type != null) { + IndexFeatureStats stats = fieldTypes.computeIfAbsent(type, IndexFeatureStats::new); + stats.count++; + if (indexFieldTypes.add(type)) { + stats.indexCount++; + } + } + }); + } + } + return new MappingStats(fieldTypes.values()); + } + + private final Set fieldTypeStats; + + MappingStats(Collection fieldTypeStats) { + List stats = new ArrayList<>(fieldTypeStats); + stats.sort(Comparator.comparing(IndexFeatureStats::getName)); + this.fieldTypeStats = Collections.unmodifiableSet(new LinkedHashSet(stats)); + } + + MappingStats(StreamInput in) throws IOException { + fieldTypeStats = Collections.unmodifiableSet(new LinkedHashSet<>(in.readList(IndexFeatureStats::new))); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(fieldTypeStats); + } + + /** + * Return stats about field types. + */ + public Set getFieldTypeStats() { + return fieldTypeStats; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject("mappings"); + builder.startArray("field_types"); + for (IndexFeatureStats st : fieldTypeStats) { + st.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + + @Override + public String toString() { + return Strings.toString(this, true, true); + } + + @Override + public boolean equals(Object o) { + if (o instanceof MappingStats == false) { + return false; + } + MappingStats that = (MappingStats) o; + return fieldTypeStats.equals(that.fieldTypeStats); + } + + @Override + public int hashCode() { + return fieldTypeStats.hashCode(); + } +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitor.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitor.java new file mode 100644 index 0000000000000..49e3a99c244b8 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitor.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import java.util.Map; +import java.util.function.Consumer; + +final class MappingVisitor { + + private MappingVisitor() {} + + static void visitMapping(Map mapping, Consumer> fieldMappingConsumer) { + Object properties = mapping.get("properties"); + if (properties != null && properties instanceof Map) { + @SuppressWarnings("unchecked") + Map propertiesAsMap = (Map) properties; + for (Object v : propertiesAsMap.values()) { + if (v != null && v instanceof Map) { + + @SuppressWarnings("unchecked") + Map fieldMapping = (Map) v; + fieldMappingConsumer.accept(fieldMapping); + visitMapping(fieldMapping, fieldMappingConsumer); + + // Multi fields + Object fieldsO = fieldMapping.get("fields"); + if (fieldsO != null && fieldsO instanceof Map) { + @SuppressWarnings("unchecked") + Map fields = (Map) fieldsO; + for (Object v2 : fields.values()) { + if (v2 instanceof Map) { + @SuppressWarnings("unchecked") + Map fieldMapping2 = (Map) v2; + fieldMappingConsumer.accept(fieldMapping2); + } + } + } + } + } + } + } + +} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 4f23874a54fbd..a2c261336ef2e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.nodes.BaseNodeRequest; import org.elasticsearch.action.support.nodes.TransportNodesAction; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.service.ClusterService; @@ -73,12 +74,14 @@ public TransportClusterStatsAction(ThreadPool threadPool, ClusterService cluster @Override protected ClusterStatsResponse newResponse(ClusterStatsRequest request, List responses, List failures) { + ClusterState state = clusterService.state(); return new ClusterStatsResponse( System.currentTimeMillis(), - clusterService.state().metaData().clusterUUID(), + state.metaData().clusterUUID(), clusterService.getClusterName(), responses, - failures); + failures, + state); } @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStatsTests.java new file mode 100644 index 0000000000000..74c966eb58b73 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStatsTests.java @@ -0,0 +1,170 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +public class AnalysisStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected Reader instanceReader() { + return AnalysisStats::new; + } + + private static IndexFeatureStats randomStats(String name) { + IndexFeatureStats stats = new IndexFeatureStats(name); + stats.indexCount = randomIntBetween(1, 5); + stats.count = randomIntBetween(stats.indexCount, 10); + return stats; + } + + @Override + protected AnalysisStats createTestInstance() { + Set charFilters = new HashSet<>(); + if (randomBoolean()) { + charFilters.add(randomStats("pattern_replace")); + } + + Set tokenizers = new HashSet<>(); + if (randomBoolean()) { + tokenizers.add(randomStats("whitespace")); + } + + Set tokenFilters = new HashSet<>(); + if (randomBoolean()) { + tokenFilters.add(randomStats("stop")); + } + + Set analyzers = new HashSet<>(); + if (randomBoolean()) { + tokenFilters.add(randomStats("english")); + } + + Set builtInCharFilters = new HashSet<>(); + if (randomBoolean()) { + builtInCharFilters.add(randomStats("html_strip")); + } + + Set builtInTokenizers = new HashSet<>(); + if (randomBoolean()) { + builtInTokenizers.add(randomStats("keyword")); + } + + Set builtInTokenFilters = new HashSet<>(); + if (randomBoolean()) { + builtInTokenFilters.add(randomStats("trim")); + } + + Set builtInAnalyzers = new HashSet<>(); + if (randomBoolean()) { + builtInAnalyzers.add(randomStats("french")); + } + return new AnalysisStats(charFilters, tokenizers, tokenFilters, analyzers, + builtInCharFilters, builtInTokenizers, builtInTokenFilters, builtInAnalyzers); + } + + @Override + protected AnalysisStats mutateInstance(AnalysisStats instance) throws IOException { + switch (randomInt(7)) { + case 0: + Set charFilters = new HashSet<>(instance.getUsedCharFilterTypes()); + if (charFilters.removeIf(s -> s.getName().equals("pattern_replace")) == false) { + charFilters.add(randomStats("pattern_replace")); + } + return new AnalysisStats(charFilters, instance.getUsedTokenizerTypes(), + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 1: + Set tokenizers = new HashSet<>(instance.getUsedTokenizerTypes()); + if (tokenizers.removeIf(s -> s.getName().equals("whitespace")) == false) { + tokenizers.add(randomStats("whitespace")); + } + return new AnalysisStats(instance.getUsedCharFilterTypes(), tokenizers, + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 2: + Set tokenFilters = new HashSet<>(instance.getUsedTokenFilterTypes()); + if (tokenFilters.removeIf(s -> s.getName().equals("stop")) == false) { + tokenFilters.add(randomStats("stop")); + } + return new AnalysisStats(instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), + tokenFilters, instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 3: + Set analyzers = new HashSet<>(instance.getUsedAnalyzerTypes()); + if (analyzers.removeIf(s -> s.getName().equals("english")) == false) { + analyzers.add(randomStats("english")); + } + return new AnalysisStats(instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), analyzers, + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 4: + Set builtInCharFilters = new HashSet<>(instance.getUsedBuiltInCharFilters()); + if (builtInCharFilters.removeIf(s -> s.getName().equals("html_strip")) == false) { + builtInCharFilters.add(randomStats("html_strip")); + } + return new AnalysisStats(instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), + instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), builtInCharFilters, + instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 5: + Set builtInTokenizers = new HashSet<>(instance.getUsedBuiltInTokenizers()); + if (builtInTokenizers.removeIf(s -> s.getName().equals("keyword")) == false) { + builtInTokenizers.add(randomStats("keyword")); + } + return new AnalysisStats(instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), builtInTokenizers, instance.getUsedBuiltInTokenFilters(), + instance.getUsedBuiltInAnalyzers()); + case 6: + Set builtInTokenFilters = new HashSet<>(instance.getUsedBuiltInTokenFilters()); + if (builtInTokenFilters.removeIf(s -> s.getName().equals("trim")) == false) { + builtInTokenFilters.add(randomStats("trim")); + } + return new AnalysisStats(instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), builtInTokenFilters, + instance.getUsedBuiltInAnalyzers()); + case 7: + Set builtInAnalyzers = new HashSet<>(instance.getUsedBuiltInAnalyzers()); + if (builtInAnalyzers.removeIf(s -> s.getName().equals("french")) == false) { + builtInAnalyzers.add(randomStats("french")); + } + return new AnalysisStats(instance.getUsedCharFilterTypes(), + instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), + instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), + builtInAnalyzers); + default: + throw new AssertionError(); + } + + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java new file mode 100644 index 0000000000000..21ba1725db2ac --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingStatsTests.java @@ -0,0 +1,70 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.stats; + +import org.elasticsearch.common.io.stream.Writeable.Reader; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +public class MappingStatsTests extends AbstractWireSerializingTestCase { + + @Override + protected Reader instanceReader() { + return MappingStats::new; + } + + @Override + protected MappingStats createTestInstance() { + Collection stats = new ArrayList<>(); + if (randomBoolean()) { + IndexFeatureStats s = new IndexFeatureStats("keyword"); + s.count = 10; + s.indexCount = 7; + stats.add(s); + } + if (randomBoolean()) { + IndexFeatureStats s = new IndexFeatureStats("integer"); + s.count = 3; + s.indexCount = 3; + stats.add(s); + } + return new MappingStats(stats); + } + + @Override + protected MappingStats mutateInstance(MappingStats instance) throws IOException { + List fieldTypes = new ArrayList<>(instance.getFieldTypeStats()); + boolean remove = fieldTypes.size() > 0 && randomBoolean(); + if (remove) { + fieldTypes.remove(randomInt(fieldTypes.size() - 1)); + } + if (remove == false || randomBoolean()) { + IndexFeatureStats s = new IndexFeatureStats("float"); + s.count = 13; + s.indexCount = 2; + fieldTypes.add(s); + } + return new MappingStats(fieldTypes); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexUsageTransportActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitorTests.java similarity index 78% rename from x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexUsageTransportActionTests.java rename to server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitorTests.java index 605e8cc0e1534..3fdd75d510959 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexUsageTransportActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/MappingVisitorTests.java @@ -1,10 +1,23 @@ /* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. */ -package org.elasticsearch.xpack.oss; +package org.elasticsearch.action.admin.cluster.stats; import org.elasticsearch.test.ESTestCase; @@ -15,10 +28,10 @@ import java.util.Map; import java.util.Set; -public class IndexUsageTransportActionTests extends ESTestCase { +public class MappingVisitorTests extends ESTestCase { private static void collectTypes(Map mapping, Set types) { - IndexUsageTransportAction.visitMapping(mapping, + MappingVisitor.visitMapping(mapping, m -> { if (m.containsKey("type")) { types.add(m.get("type").toString()); @@ -102,5 +115,4 @@ public void testCountInnerFields() { collectTypes(mapping, fields); assertEquals(new HashSet<>(Arrays.asList("keyword", "object")), fields); } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 9722f8437ab1a..d5b312523d103 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -236,7 +236,6 @@ import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; -import org.elasticsearch.xpack.oss.IndexFeatureSetUsage; import java.util.ArrayList; import java.util.Arrays; @@ -564,8 +563,7 @@ public List getNamedWriteables() { // Spatial new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SPATIAL, SpatialFeatureSetUsage::new), // data science - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new), - new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX, IndexFeatureSetUsage::new) + new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ANALYTICS, AnalyticsFeatureSetUsage::new) ).stream(), MlEvaluationNamedXContentProvider.getNamedWriteables().stream() ).collect(toList()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java index 3a836931b45a3..8a74272429f87 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackField.java @@ -53,8 +53,6 @@ public final class XPackField { public static final String ANALYTICS = "analytics"; /** Name constant for the enrich plugin. */ public static final String ENRICH = "enrich"; - /** Name constant for indices. */ - public static final String INDEX = "index"; private XPackField() {} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 7a0307308b95a..9a2d5fd65eb6d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -59,7 +59,6 @@ import org.elasticsearch.xpack.core.action.TransportXPackUsageAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; import org.elasticsearch.xpack.core.action.XPackUsageResponse; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.rest.action.RestReloadAnalyzersAction; @@ -69,7 +68,6 @@ import org.elasticsearch.xpack.core.ssl.SSLConfigurationReloader; import org.elasticsearch.xpack.core.ssl.SSLService; import org.elasticsearch.xpack.core.watcher.WatcherMetaData; -import org.elasticsearch.xpack.oss.IndexUsageTransportAction; import java.nio.file.Files; import java.nio.file.Path; @@ -257,7 +255,6 @@ public Collection createComponents(Client client, ClusterService cluster actions.add(new ActionHandler<>(XPackUsageAction.INSTANCE, getUsageAction())); actions.addAll(licensing.getActions()); actions.add(new ActionHandler<>(ReloadAnalyzerAction.INSTANCE, TransportReloadAnalyzersAction.class)); - actions.add(new ActionHandler<>(XPackUsageFeatureAction.INDEX, IndexUsageTransportAction.class)); return actions; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java index 15e18ef38a4f9..fe43f9661488a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageFeatureAction.java @@ -40,11 +40,10 @@ public class XPackUsageFeatureAction extends ActionType ALL = Arrays.asList( SECURITY, MONITORING, WATCHER, GRAPH, MACHINE_LEARNING, LOGSTASH, SQL, ROLLUP, INDEX_LIFECYCLE, SNAPSHOT_LIFECYCLE, CCR, - TRANSFORM, FLATTENED, VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL, ANALYTICS, INDEX + TRANSFORM, FLATTENED, VECTORS, VOTING_ONLY, FROZEN_INDICES, SPATIAL, ANALYTICS ); private XPackUsageFeatureAction(String name) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java deleted file mode 100644 index d086d649f4ae0..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsage.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.oss; - -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.xpack.core.XPackFeatureSet; -import org.elasticsearch.xpack.core.XPackField; - -import java.io.IOException; -import java.util.Collections; -import java.util.Objects; -import java.util.Set; -import java.util.TreeSet; - -public class IndexFeatureSetUsage extends XPackFeatureSet.Usage { - - private static Set sort(Set set) { - return Collections.unmodifiableSet(new TreeSet<>(set)); - } - - private final Set usedFieldTypes; - private final Set usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers; - private final Set usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers; - - public IndexFeatureSetUsage(Set usedFieldTypes, - Set usedCharFilters, Set usedTokenizers, Set usedTokenFilters, Set usedAnalyzers, - Set usedBuiltInCharFilters, Set usedBuiltInTokenizers, Set usedBuiltInTokenFilters, - Set usedBuiltInAnalyzers) { - super(XPackField.INDEX, true, true); - this.usedFieldTypes = sort(usedFieldTypes); - this.usedCharFilters = sort(usedCharFilters); - this.usedTokenizers = sort(usedTokenizers); - this.usedTokenFilters = sort(usedTokenFilters); - this.usedAnalyzers = sort(usedAnalyzers); - this.usedBuiltInCharFilters = sort(usedBuiltInCharFilters); - this.usedBuiltInTokenizers = sort(usedBuiltInTokenizers); - this.usedBuiltInTokenFilters = sort(usedBuiltInTokenFilters); - this.usedBuiltInAnalyzers = sort(usedBuiltInAnalyzers); - } - - public IndexFeatureSetUsage(StreamInput input) throws IOException { - super(input); - usedFieldTypes = input.readSet(StreamInput::readString); - usedCharFilters = input.readSet(StreamInput::readString); - usedTokenizers = input.readSet(StreamInput::readString); - usedTokenFilters = input.readSet(StreamInput::readString); - usedAnalyzers = input.readSet(StreamInput::readString); - usedBuiltInCharFilters = input.readSet(StreamInput::readString); - usedBuiltInTokenizers = input.readSet(StreamInput::readString); - usedBuiltInTokenFilters = input.readSet(StreamInput::readString); - usedBuiltInAnalyzers = input.readSet(StreamInput::readString); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeCollection(usedFieldTypes, StreamOutput::writeString); - out.writeCollection(usedCharFilters, StreamOutput::writeString); - out.writeCollection(usedTokenizers, StreamOutput::writeString); - out.writeCollection(usedTokenFilters, StreamOutput::writeString); - out.writeCollection(usedAnalyzers, StreamOutput::writeString); - out.writeCollection(usedBuiltInCharFilters, StreamOutput::writeString); - out.writeCollection(usedBuiltInTokenizers, StreamOutput::writeString); - out.writeCollection(usedBuiltInTokenFilters, StreamOutput::writeString); - out.writeCollection(usedBuiltInAnalyzers, StreamOutput::writeString); - } - - /** - * Return the set of used field types in the cluster. - */ - public Set getUsedFieldTypes() { - return usedFieldTypes; - } - - /** - * Return the set of used char filters in the cluster. - */ - public Set getUsedCharFilterTypes() { - return usedCharFilters; - } - - /** - * Return the set of used tokenizers in the cluster. - */ - public Set getUsedTokenizerTypes() { - return usedTokenizers; - } - - /** - * Return the set of used token filters in the cluster. - */ - public Set getUsedTokenFilterTypes() { - return usedTokenFilters; - } - - /** - * Return the set of used analyzers in the cluster. - */ - public Set getUsedAnalyzerTypes() { - return usedAnalyzers; - } - - /** - * Return the set of used built-in char filters in the cluster. - */ - public Set getUsedBuiltInCharFilters() { - return usedBuiltInCharFilters; - } - - /** - * Return the set of used built-in tokenizers in the cluster. - */ - public Set getUsedBuiltInTokenizers() { - return usedBuiltInTokenizers; - } - - /** - * Return the set of used built-in token filters in the cluster. - */ - public Set getUsedBuiltInTokenFilters() { - return usedBuiltInTokenFilters; - } - - /** - * Return the set of used built-in analyzers in the cluster. - */ - public Set getUsedBuiltInAnalyzers() { - return usedBuiltInAnalyzers; - } - - @Override - protected void innerXContent(XContentBuilder builder, Params params) throws IOException { - super.innerXContent(builder, params); - - builder.startObject("analysis"); - { - builder.field("char_filter_types", usedCharFilters); - builder.field("tokenizer_types", usedTokenizers); - builder.field("filter_types", usedTokenFilters); - builder.field("analyzer_types", usedAnalyzers); - - builder.field("built_in_char_filters", usedBuiltInCharFilters); - builder.field("built_in_tokenizers", usedBuiltInTokenizers); - builder.field("built_in_filters", usedBuiltInTokenFilters); - builder.field("built_in_analyzers", usedBuiltInAnalyzers); - } - builder.endObject(); - - builder.startObject("mappings"); - { - builder.field("field_types", usedFieldTypes); - } - builder.endObject(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - IndexFeatureSetUsage that = (IndexFeatureSetUsage) o; - return available == that.available && enabled == that.enabled && - Objects.equals(usedFieldTypes, that.usedFieldTypes) && - Objects.equals(usedCharFilters, that.usedCharFilters) && - Objects.equals(usedTokenizers, that.usedTokenizers) && - Objects.equals(usedTokenFilters, that.usedTokenFilters) && - Objects.equals(usedAnalyzers, that.usedAnalyzers) && - Objects.equals(usedBuiltInCharFilters, that.usedBuiltInCharFilters) && - Objects.equals(usedBuiltInTokenizers, that.usedBuiltInTokenizers) && - Objects.equals(usedBuiltInTokenFilters, that.usedBuiltInTokenFilters) && - Objects.equals(usedBuiltInAnalyzers, that.usedBuiltInAnalyzers); - } - - @Override - public int hashCode() { - return Objects.hash(available, enabled, usedFieldTypes, usedCharFilters, usedTokenizers, usedTokenFilters, - usedAnalyzers, usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, - usedBuiltInAnalyzers); - } - - @Override - public String toString() { - return Strings.toString(this, true, true); - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexUsageTransportAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexUsageTransportAction.java deleted file mode 100644 index dd0e002c93e84..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/IndexUsageTransportAction.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.oss; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.protocol.xpack.XPackUsageRequest; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureAction; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureResponse; -import org.elasticsearch.xpack.core.action.XPackUsageFeatureTransportAction; - -import java.util.Collection; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.function.Consumer; - -public class IndexUsageTransportAction extends XPackUsageFeatureTransportAction { - - @Inject - public IndexUsageTransportAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(XPackUsageFeatureAction.INDEX.name(), transportService, clusterService, threadPool, actionFilters, - indexNameExpressionResolver); - } - - @Override - protected void masterOperation(Task task, XPackUsageRequest request, ClusterState state, - ActionListener listener) { - - final Set usedFieldTypes = new HashSet<>(); - final Set usedCharFilters = new HashSet<>(); - final Set usedTokenizers = new HashSet<>(); - final Set usedTokenFilters = new HashSet<>(); - final Set usedAnalyzers = new HashSet<>(); - final Set usedBuiltInCharFilters = new HashSet<>(); - final Set usedBuiltInTokenizers = new HashSet<>(); - final Set usedBuiltInTokenFilters = new HashSet<>(); - final Set usedBuiltInAnalyzers = new HashSet<>(); - - for (IndexMetaData indexMetaData : state.metaData()) { - MappingMetaData mappingMetaData = indexMetaData.mapping(); - if (mappingMetaData != null) { - visitMapping(mappingMetaData.getSourceAsMap(), fieldMapping -> { - Object type = fieldMapping.get("type"); - if (type != null) { - usedFieldTypes.add(type.toString()); - } else if (fieldMapping.containsKey("properties")) { - usedFieldTypes.add("object"); - } - - for (String key : new String[] { "analyzer", "search_analyzer", "search_quote_analyzer" }) { - Object analyzer = fieldMapping.get(key); - if (analyzer != null) { - usedBuiltInAnalyzers.add(analyzer.toString()); - } - } - }); - } - - Settings indexSettings = indexMetaData.getSettings(); - - Map analyzerSettings = indexSettings.getGroups("index.analysis.analyzer"); - usedBuiltInAnalyzers.removeAll(analyzerSettings.keySet()); - for (Settings analyzerSetting : analyzerSettings.values()) { - usedAnalyzers.add(analyzerSetting.get("type", "custom")); - usedBuiltInCharFilters.addAll(analyzerSetting.getAsList("char_filter")); - String tokenizer = analyzerSetting.get("tokenizer"); - if (tokenizer != null) { - usedBuiltInTokenizers.add(tokenizer); - } - usedBuiltInTokenFilters.addAll(analyzerSetting.getAsList("filter")); - } - - Map charFilterSettings = indexSettings.getGroups("index.analysis.char_filter"); - usedBuiltInCharFilters.removeAll(charFilterSettings.keySet()); - aggregateAnalysisTypes(charFilterSettings.values(), usedCharFilters); - - Map tokenizerSettings = indexSettings.getGroups("index.analysis.tokenizer"); - usedBuiltInTokenizers.removeAll(tokenizerSettings.keySet()); - aggregateAnalysisTypes(tokenizerSettings.values(), usedTokenizers); - - Map tokenFilterSettings = indexSettings.getGroups("index.analysis.filter"); - usedBuiltInTokenFilters.removeAll(tokenFilterSettings.keySet()); - aggregateAnalysisTypes(tokenFilterSettings.values(), usedTokenFilters); - } - - listener.onResponse(new XPackUsageFeatureResponse( - new IndexFeatureSetUsage(usedFieldTypes, - usedCharFilters, usedTokenizers, usedTokenFilters, usedAnalyzers, - usedBuiltInCharFilters, usedBuiltInTokenizers, usedBuiltInTokenFilters, usedBuiltInAnalyzers))); - } - - static void visitMapping(Map mapping, Consumer> fieldMappingConsumer) { - Object properties = mapping.get("properties"); - if (properties != null && properties instanceof Map) { - @SuppressWarnings("unchecked") - Map propertiesAsMap = (Map) properties; - for (Object v : propertiesAsMap.values()) { - if (v != null && v instanceof Map) { - - @SuppressWarnings("unchecked") - Map fieldMapping = (Map) v; - fieldMappingConsumer.accept(fieldMapping); - visitMapping(fieldMapping, fieldMappingConsumer); - - // Multi fields - Object fieldsO = fieldMapping.get("fields"); - if (fieldsO != null && fieldsO instanceof Map) { - @SuppressWarnings("unchecked") - Map fields = (Map) fieldsO; - for (Object v2 : fields.values()) { - if (v2 instanceof Map) { - @SuppressWarnings("unchecked") - Map fieldMapping2 = (Map) v2; - fieldMappingConsumer.accept(fieldMapping2); - } - } - } - } - } - } - } - - static void aggregateAnalysisTypes(Collection analysisComponents, Set usedTypes) { - for (Settings settings : analysisComponents) { - String type = settings.get("type"); - if (type != null) { - usedTypes.add(type); - } - } - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java deleted file mode 100644 index 56582e0746737..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/oss/package-info.java +++ /dev/null @@ -1,10 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -/** - * Package containing usage information for features that are exposed in OSS. - */ -package org.elasticsearch.xpack.oss; \ No newline at end of file diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java deleted file mode 100644 index 5d3b35b67ecbc..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/oss/IndexFeatureSetUsageTests.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ - -package org.elasticsearch.xpack.oss; - -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.test.AbstractWireSerializingTestCase; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; - -public class IndexFeatureSetUsageTests extends AbstractWireSerializingTestCase { - - @Override - protected Reader instanceReader() { - return IndexFeatureSetUsage::new; - } - - @Override - protected IndexFeatureSetUsage createTestInstance() { - Set fields = new HashSet<>(); - if (randomBoolean()) { - fields.add("keyword"); - } - if (randomBoolean()) { - fields.add("integer"); - } - - Set charFilters = new HashSet<>(); - if (randomBoolean()) { - charFilters.add("pattern_replace"); - } - - Set tokenizers = new HashSet<>(); - if (randomBoolean()) { - tokenizers.add("whitespace"); - } - - Set tokenFilters = new HashSet<>(); - if (randomBoolean()) { - tokenFilters.add("stop"); - } - - Set analyzers = new HashSet<>(); - if (randomBoolean()) { - tokenFilters.add("english"); - } - - Set builtInCharFilters = new HashSet<>(); - if (randomBoolean()) { - builtInCharFilters.add("html_strip"); - } - - Set builtInTokenizers = new HashSet<>(); - if (randomBoolean()) { - builtInTokenizers.add("keyword"); - } - - Set builtInTokenFilters = new HashSet<>(); - if (randomBoolean()) { - builtInTokenFilters.add("trim"); - } - - Set builtInAnalyzers = new HashSet<>(); - if (randomBoolean()) { - builtInAnalyzers.add("french"); - } - - return new IndexFeatureSetUsage(fields, - charFilters, tokenizers, tokenFilters, analyzers, - builtInCharFilters, builtInTokenizers, builtInTokenFilters, builtInAnalyzers); - } - - @Override - protected IndexFeatureSetUsage mutateInstance(IndexFeatureSetUsage instance) throws IOException { - switch (randomInt(8)) { - case 0: - Set fields = new HashSet<>(instance.getUsedFieldTypes()); - if (fields.add("keyword") == false) { - fields.remove("keyword"); - } - return new IndexFeatureSetUsage(fields, instance.getUsedCharFilterTypes(), instance.getUsedTokenizerTypes(), - instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), - instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), - instance.getUsedBuiltInAnalyzers()); - case 1: - Set charFilters = new HashSet<>(instance.getUsedCharFilterTypes()); - if (charFilters.add("pattern_replace") == false) { - charFilters.remove("pattern_replace"); - } - return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), charFilters, instance.getUsedTokenizerTypes(), - instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), - instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), - instance.getUsedBuiltInAnalyzers()); - case 2: - Set tokenizers = new HashSet<>(instance.getUsedTokenizerTypes()); - if (tokenizers.add("whitespace") == false) { - tokenizers.remove("whitespace"); - } - return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), tokenizers, - instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), - instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), - instance.getUsedBuiltInAnalyzers()); - case 3: - Set tokenFilters = new HashSet<>(instance.getUsedTokenFilterTypes()); - if (tokenFilters.add("stop") == false) { - tokenFilters.remove("stop"); - } - return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), - instance.getUsedTokenizerTypes(), - tokenFilters, instance.getUsedAnalyzerTypes(), instance.getUsedBuiltInCharFilters(), - instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), - instance.getUsedBuiltInAnalyzers()); - case 4: - Set analyzers = new HashSet<>(instance.getUsedAnalyzerTypes()); - if (analyzers.add("english") == false) { - analyzers.remove("english"); - } - return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), - instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), analyzers, - instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), - instance.getUsedBuiltInAnalyzers()); - case 5: - Set builtInCharFilters = new HashSet<>(instance.getUsedBuiltInCharFilters()); - if (builtInCharFilters.add("html_strip") == false) { - builtInCharFilters.remove("html_strip"); - } - return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), - instance.getUsedTokenizerTypes(), - instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), builtInCharFilters, - instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), - instance.getUsedBuiltInAnalyzers()); - case 6: - Set builtInTokenizers = new HashSet<>(instance.getUsedBuiltInTokenizers()); - if (builtInTokenizers.add("keyword") == false) { - builtInTokenizers.remove("keyword"); - } - return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), - instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), - instance.getUsedBuiltInCharFilters(), builtInTokenizers, instance.getUsedBuiltInTokenFilters(), - instance.getUsedBuiltInAnalyzers()); - case 7: - Set builtInTokenFilters = new HashSet<>(instance.getUsedBuiltInTokenFilters()); - if (builtInTokenFilters.add("trim") == false) { - builtInTokenFilters.remove("trim"); - } - return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), - instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), - instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), builtInTokenFilters, - instance.getUsedBuiltInAnalyzers()); - case 8: - Set builtInAnalyzers = new HashSet<>(instance.getUsedBuiltInAnalyzers()); - if (builtInAnalyzers.add("french") == false) { - builtInAnalyzers.remove("french"); - } - return new IndexFeatureSetUsage(instance.getUsedFieldTypes(), instance.getUsedCharFilterTypes(), - instance.getUsedTokenizerTypes(), instance.getUsedTokenFilterTypes(), instance.getUsedAnalyzerTypes(), - instance.getUsedBuiltInCharFilters(), instance.getUsedBuiltInTokenizers(), instance.getUsedBuiltInTokenFilters(), - builtInAnalyzers); - default: - throw new AssertionError(); - } - } -} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index f9549034266f3..656996212208d 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -326,7 +326,8 @@ public void testToXContent() throws IOException { "_cluster", clusterName, singletonList(mockNodeResponse), - emptyList()); + emptyList(), + clusterState); final MonitoringDoc.Node node = new MonitoringDoc.Node("_uuid", "_host", "_addr", "_ip", "_name", 1504169190855L); @@ -441,6 +442,19 @@ public void testToXContent() throws IOException { + " \"fixed_bit_set_memory_in_bytes\": 0," + " \"max_unsafe_auto_id_timestamp\": -9223372036854775808," + " \"file_sizes\": {}" + + " }," + + " \"mappings\":{" + + " \"field_types\":[]" + + " }," + + " \"analysis\":{" + + " \"char_filter_types\":[]," + + " \"tokenizer_types\":[]," + + " \"filter_types\":[]," + + " \"analyzer_types\":[]," + + " \"built_in_char_filters\":[]," + + " \"built_in_tokenizers\":[]," + + " \"built_in_filters\":[]," + + " \"built_in_analyzers\":[]" + " }" + " }," + " \"nodes\": {" diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml deleted file mode 100644 index cb54a464aef2f..0000000000000 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/xpack/15_basic.yml +++ /dev/null @@ -1,262 +0,0 @@ -# Integration tests xpack info and usage API -# -"X-Pack Info and Usage": - - - do: - cluster.health: - wait_for_status: yellow - - - do: - license.delete: {} - - match: { acknowledged: true } - - # we don't have a license now - - do: - xpack.info: - categories: "license,features" - - is_false: license - - is_true: features - - is_true: features.watcher - - is_true: features.watcher.enabled -# - is_false: features.watcher.available TODO fix once licensing is fixed - - is_true: features.security - - is_true: features.security.enabled -# - is_false: features.security.available TODO fix once licensing is fixed - - is_true: features.graph - - is_true: features.graph.enabled -# - is_false: features.graph.available TODO fix once licensing is fixed - - is_true: features.monitoring - - is_true: features.monitoring.enabled -# - is_false: features.monitoring.available TODO fix once licensing is fixed - - is_true: features.analytics - - is_true: features.analytics.enabled - - - do: - license.post: - body: > - { - "license": { - "uid": "893361dc-9749-4997-93cb-802e3dofh7aa", - "type": "internal", - "subscription_type": "none", - "issue_date_in_millis": 1443484800000, - "feature": "watcher", - "expiry_date_in_millis": 1914278399999, - "max_nodes": 1, - "issued_to": "issuedTo", - "issuer": "issuer", - "signature": "AAAAAQAAAA0Sc90guRIaQEmgLvMnAAABmC9ZN0hjZDBGYnVyRXpCOW5Bb3FjZDAxOWpSbTVoMVZwUzRxVk1PSmkxakxZdW5IMlhlTHNoN1N2MXMvRFk4d3JTZEx3R3RRZ0pzU3lobWJKZnQvSEFva0ppTHBkWkprZWZSQi9iNmRQNkw1SlpLN0lDalZCS095MXRGN1lIZlpYcVVTTnFrcTE2dzhJZmZrdFQrN3JQeGwxb0U0MXZ0dDJHSERiZTVLOHNzSDByWnpoZEphZHBEZjUrTVBxRENNSXNsWWJjZllaODdzVmEzUjNiWktNWGM5TUhQV2plaUo4Q1JOUml4MXNuL0pSOEhQaVB2azhmUk9QVzhFeTFoM1Q0RnJXSG53MWk2K055c28zSmRnVkF1b2JSQkFLV2VXUmVHNDZ2R3o2VE1qbVNQS2lxOHN5bUErZlNIWkZSVmZIWEtaSU9wTTJENDVvT1NCYklacUYyK2FwRW9xa0t6dldMbmMzSGtQc3FWOTgzZ3ZUcXMvQkt2RUZwMFJnZzlvL2d2bDRWUzh6UG5pdENGWFRreXNKNkE9PQAAAQCQ94dju0pnDZR3Uuypi0ic3aQJ+nvVqe+U8u79Dga5n1qIjcHDh7HvIBJEkF+tnVPlo/PXV/x7BZSwVY1PVErit+6rYix1yuHEgqwxmx/VdRICjCaZM6tk0Ob4dZCPv6Ebn2Mmk89KHC/PwiLPqF6QfwV/Pkpa8k2A3ORJmvYSDvXhe6tCs8dqc4ebrsFxqrZjwWh5CZSpzqqZBFXlngDv2N0hHhpGlueRszD0JJ5dfEL5ZA1DDOrgO9OJVejSHyRqe1L5QRUNdXPVfS+EAG0Dd1cNdJ/sMpYCPnVjbw6iq2/YgM3cuztsXVBY7ij4WnoP3ce7Zjs9TwHn+IqzftC6" - } - } - - match: { license_status: "valid" } - - - do: - license.get: {} - - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } - - match: { license.type: "internal" } - - match: { license.status: "active" } - - - do: - xpack.info: {} - - is_true: build.hash - - is_true: build.date - - is_true: license - - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } - - match: { license.type: "internal" } - - match: { license.mode: "trial" } - - match: { license.status: "active" } - - match: { license.expiry_date_in_millis: 1914278399999 } - - is_true: features - - is_true: features.watcher - - is_true: features.watcher.enabled - - is_true: features.watcher.available - - is_true: features.security - - is_true: features.security.enabled - - is_true: features.security.available - - is_true: features.graph - - is_true: features.graph.enabled - - is_true: features.graph.available - - is_true: features.monitoring - - is_true: features.monitoring.enabled - - is_true: features.monitoring.available - - is_true: features.analytics.enabled - - is_true: features.analytics.available - - is_true: features.enrich.available - - is_true: features.enrich.enabled - - is_true: tagline - - - do: - xpack.usage: {} - - is_true: watcher.enabled - - is_true: watcher.available - - is_true: security.enabled - - is_true: security.available - - is_true: graph.enabled - - is_true: graph.available - - is_true: monitoring.enabled - - is_true: monitoring.available - - is_true: analytics.available - - - do: - xpack.info: - categories: "_none" - - is_false: build - - is_false: features - - is_false: license - - match: { tagline: "You know, for X" } - - - do: - xpack.info: - categories: "_none" - human: false - - is_false: build - - is_false: features - - is_false: license - - is_false: tagline - - - do: - xpack.info: - categories: "build" - - is_true: build - - is_true: build.hash - - is_true: build.date - - is_true: tagline - - is_false: features - - is_false: license - - - do: - xpack.info: - categories: "build,license" - - is_true: build.hash - - is_true: build.date - - is_true: tagline - - is_false: features - - is_true: license - - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } - - match: { license.type: "internal" } - - match: { license.mode: "trial" } - - match: { license.status: "active" } - - match: { license.expiry_date_in_millis: 1914278399999 } - - - - do: - xpack.info: - categories: "build,license,features" - human: false - - is_true: build.hash - - is_true: build.date - - is_true: license - - match: { license.uid: "893361dc-9749-4997-93cb-802e3dofh7aa" } - - match: { license.type: "internal" } - - match: { license.mode: "trial" } - - match: { license.status: "active" } - - match: { license.expiry_date_in_millis: 1914278399999 } - - is_true: features - - is_true: features.watcher - - is_true: features.watcher.enabled - - is_true: features.watcher.available - - is_true: features.security - - is_true: features.security.enabled - - is_true: features.security.available - - is_true: features.graph - - is_true: features.graph.enabled - - is_true: features.graph.available - - is_true: features.monitoring - - is_true: features.monitoring.enabled - - is_true: features.monitoring.available - - is_false: tagline - ---- -"Usage stats for mappings": - - - skip: - version: "all" - reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/51127" - - - do: - xpack.usage: {} - - - match: { index.mappings.field_types: [] } - - - do: - indices.create: - index: test-index1 - body: - mappings: - properties: - foo: - type: keyword - - - do: - indices.create: - index: test-index2 - body: - mappings: - properties: - foo: - type: keyword - bar: - properties: - quux: - type: integer - - - do: - xpack.usage: {} - - - match: { index.mappings.field_types: [ "integer", "keyword", "object" ] } - ---- -"Usage stats for analysis": - - do: - xpack.usage: {} - - - match: { index.analysis.char_filter_types: [] } - - match: { index.analysis.tokenizer_types: [] } - - match: { index.analysis.filter_types: [] } - - match: { index.analysis.analyzer_types: [] } - - - do: - indices.create: - index: test-index1 - body: - settings: - analysis: - char_filter: - c: - type: mapping - mappings: [ "a => b" ] - tokenizer: - tok: - type: pattern - pattern: "," - filter: - st: - type: stop - stopwords: [ "a" ] - analyzer: - en: - type: standard - stopwords: "_english_" - cust: - char_filter: [ "html_strip" ] - tokenizer: "keyword" - filter: [ "trim" ] - mappings: - properties: - message: - type: "text" - analyzer: french - search_analyzer: spanish - search_quote_analyzer: german - - - do: - xpack.usage: {} - - - match: { index.analysis.char_filter_types: [ "mapping" ] } - - match: { index.analysis.tokenizer_types: [ "pattern" ] } - - match: { index.analysis.filter_types: [ "stop" ] } - - match: { index.analysis.analyzer_types: [ "custom", "standard" ] } - - match: { index.analysis.built_in_char_filters: [ "html_strip" ] } - - match: { index.analysis.built_in_tokenizers: [ "keyword" ] } - - match: { index.analysis.built_in_filters: [ "trim" ] } - - match: { index.analysis.built_in_analyzers: [ "french", "german", "spanish" ] }