Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adding data_lifecycle to the _xpack/usage API #96177

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions docs/changelog/96177.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
pr: 96177
summary: Adding `data_lifecycle` to the _xpack/usage API
area: DLM
type: enhancement
issues: []
13 changes: 13 additions & 0 deletions docs/reference/rest-api/usage.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,19 @@ GET /_xpack/usage
"data_streams" : 0,
"indices_count" : 0
},
"data_lifecycle" : {
"available": true,
"enabled": true,
"lifecycle": {
"count": 0,
"default_rollover_used": true,
"retention": {
"minimum_millis": 0,
"maximum_millis": 0,
"average_millis": 0.0
}
}
},
"data_tiers" : {
"available" : true,
"enabled" : true,
Expand Down
3 changes: 2 additions & 1 deletion server/src/main/java/org/elasticsearch/TransportVersion.java
Original file line number Diff line number Diff line change
Expand Up @@ -126,12 +126,13 @@ private static TransportVersion registerTransportVersion(int id, String uniqueId
public static final TransportVersion V_8_500_003 = registerTransportVersion(8_500_003, "30adbe0c-8614-40dd-81b5-44e9c657bb77");
public static final TransportVersion V_8_500_004 = registerTransportVersion(8_500_004, "6a00db6a-fd66-42a9-97ea-f6cc53169110");
public static final TransportVersion V_8_500_005 = registerTransportVersion(8_500_005, "65370d2a-d936-4383-a2e0-8403f708129b");
public static final TransportVersion V_8_500_006 = registerTransportVersion(8_500_006, "7BB5621A-80AC-425F-BA88-75543C442F23");

/**
* Reference to the most recent transport version.
* This should be the transport version with the highest id.
*/
public static final TransportVersion CURRENT = V_8_500_005;
public static final TransportVersion CURRENT = V_8_500_006;

/**
* Reference to the earliest compatible transport version to this version of the codebase.
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/

package org.elasticsearch.xpack.core.action;

import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.metadata.DataLifecycle;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.DataStreamAlias;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.core.Tuple;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexMode;
import org.elasticsearch.plugins.ActionPlugin;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.protocol.xpack.XPackUsageRequest;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.xcontent.ToXContent;
import org.elasticsearch.xcontent.XContentBuilder;
import org.elasticsearch.xcontent.XContentFactory;
import org.elasticsearch.xcontent.XContentType;
import org.elasticsearch.xpack.core.XPackClientPlugin;
import org.junit.After;

import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Function;

import static org.elasticsearch.xpack.core.action.XPackUsageFeatureAction.DATA_LIFECYCLE;
import static org.hamcrest.Matchers.equalTo;

public class DataLifecycleUsageTransportActionIT extends ESIntegTestCase {
/*
* The DataLifecycleUsageTransportAction is not exposed in the xpack core plugin, so we have a special test plugin to do this
*/
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return List.of(TestDateLifecycleUsagePlugin.class);
}

@After
private void cleanup() throws Exception {
updateClusterState(clusterState -> {
ClusterState.Builder clusterStateBuilder = new ClusterState.Builder(clusterState);
Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata());
metadataBuilder.dataStreams(Map.of(), Map.of());
clusterStateBuilder.metadata(metadataBuilder);
return clusterStateBuilder.build();
});
updateClusterSettings(Settings.builder().put(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getKey(), (String) null));
}

@SuppressWarnings("unchecked")
public void testAction() throws Exception {
assertUsageResults(0, 0, 0, 0.0, true);
AtomicLong count = new AtomicLong(0);
AtomicLong totalRetentionTimes = new AtomicLong(0);
AtomicLong minRetention = new AtomicLong(Long.MAX_VALUE);
AtomicLong maxRetention = new AtomicLong(Long.MIN_VALUE);
boolean useDefaultRolloverConfig = randomBoolean();
if (useDefaultRolloverConfig == false) {
updateClusterSettings(Settings.builder().put(DataLifecycle.CLUSTER_DLM_DEFAULT_ROLLOVER_SETTING.getKey(), "min_docs=33"));
}
/*
* We now add a number of simulated data streams to the cluster state. Some have lifecycles, some don't. The ones with lifecycles
* have varying retention periods. After adding them, we make sure the numbers add up.
*/
updateClusterState(clusterState -> {
Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata());
Map<String, DataStream> dataStreamMap = new HashMap<>();
for (int dataStreamCount = 0; dataStreamCount < randomInt(200); dataStreamCount++) {
boolean hasLifecycle = randomBoolean();
long retentionMillis;
if (hasLifecycle) {
retentionMillis = randomLongBetween(1000, 100000);
count.incrementAndGet();
totalRetentionTimes.addAndGet(retentionMillis);
if (retentionMillis < minRetention.get()) {
minRetention.set(retentionMillis);
}
if (retentionMillis > maxRetention.get()) {
maxRetention.set(retentionMillis);
}
} else {
retentionMillis = 0;
}
List<Index> indices = new ArrayList<>();
for (int indicesCount = 0; indicesCount < randomIntBetween(1, 10); indicesCount++) {
Index index = new Index(randomAlphaOfLength(60), randomAlphaOfLength(60));
indices.add(index);
}
boolean systemDataStream = randomBoolean();
DataStream dataStream = new DataStream(
randomAlphaOfLength(50),
indices,
randomLongBetween(0, 1000),
Map.of(),
systemDataStream || randomBoolean(),
randomBoolean(),
systemDataStream,
randomBoolean(),
IndexMode.STANDARD,
hasLifecycle ? new DataLifecycle(retentionMillis) : null
);
dataStreamMap.put(dataStream.getName(), dataStream);
}
Map<String, DataStreamAlias> dataStreamAliasesMap = Map.of();
metadataBuilder.dataStreams(dataStreamMap, dataStreamAliasesMap);
ClusterState.Builder clusterStateBuilder = new ClusterState.Builder(clusterState);
clusterStateBuilder.metadata(metadataBuilder);
return clusterStateBuilder.build();
});
int expectedMinimumRetention = minRetention.get() == Long.MAX_VALUE ? 0 : minRetention.intValue();
int expectedMaximumRetention = maxRetention.get() == Long.MIN_VALUE ? 0 : maxRetention.intValue();
double expectedAverageRetention = count.get() == 0 ? 0.0 : totalRetentionTimes.doubleValue() / count.get();
assertUsageResults(
count.intValue(),
expectedMinimumRetention,
expectedMaximumRetention,
expectedAverageRetention,
useDefaultRolloverConfig
);
}

@SuppressWarnings("unchecked")
private void assertUsageResults(
int count,
int minimumRetention,
int maximumRetention,
double averageRetention,
boolean defaultRolloverUsed
) throws Exception {
XPackUsageFeatureResponse response = client().execute(DATA_LIFECYCLE, new XPackUsageRequest()).get();
XContentBuilder builder = XContentFactory.jsonBuilder();
builder = response.getUsage().toXContent(builder, ToXContent.EMPTY_PARAMS);
Tuple<XContentType, Map<String, Object>> tuple = XContentHelper.convertToMap(
BytesReference.bytes(builder),
true,
XContentType.JSON
);

Map<String, Object> map = tuple.v2();
assertThat(map.get("available"), equalTo(true));
assertThat(map.get("enabled"), equalTo(true));
assertThat(map.get("count"), equalTo(count));
assertThat(map.get("default_rollover_used"), equalTo(defaultRolloverUsed));
Map<String, Object> retentionMap = (Map<String, Object>) map.get("retention");
assertThat(retentionMap.size(), equalTo(3));
assertThat(retentionMap.get("minimum_millis"), equalTo(minimumRetention));
assertThat(retentionMap.get("maximum_millis"), equalTo(maximumRetention));
assertThat(retentionMap.get("average_millis"), equalTo(averageRetention));
}

/*
* Updates the cluster state in the internal cluster using the provided function
*/
protected static void updateClusterState(final Function<ClusterState, ClusterState> updater) throws Exception {
final PlainActionFuture<Void> future = PlainActionFuture.newFuture();
final ClusterService clusterService = internalCluster().getCurrentMasterNodeInstance(ClusterService.class);
clusterService.submitUnbatchedStateUpdateTask("test", new ClusterStateUpdateTask() {
@Override
public ClusterState execute(ClusterState currentState) {
return updater.apply(currentState);
}

@Override
public void onFailure(Exception e) {
future.onFailure(e);
}

@Override
public void clusterStateProcessed(ClusterState oldState, ClusterState newState) {
future.onResponse(null);
}
});
future.get();
}

/*
* This plugin exposes the DataLifecycleUsageTransportAction.
*/
public static final class TestDateLifecycleUsagePlugin extends XPackClientPlugin {
@Override
public List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> getActions() {
List<ActionHandler<? extends ActionRequest, ? extends ActionResponse>> actions = new ArrayList<>();
actions.add(new ActionPlugin.ActionHandler<>(DATA_LIFECYCLE, DataLifecycleUsageTransportAction.class));
return actions;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import org.elasticsearch.action.ActionType;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.metadata.DataLifecycle;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Setting;
Expand Down Expand Up @@ -38,6 +39,7 @@
import org.elasticsearch.xpack.core.archive.ArchiveFeatureSetUsage;
import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction;
import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata;
import org.elasticsearch.xpack.core.datastreams.DataLifecycleFeatureSetUsage;
import org.elasticsearch.xpack.core.datastreams.DataStreamFeatureSetUsage;
import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction;
import org.elasticsearch.xpack.core.enrich.EnrichFeatureSetUsage;
Expand Down Expand Up @@ -234,6 +236,8 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Objects;
import java.util.stream.Stream;

// TODO: merge this into XPackPlugin
public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPlugin {
Expand Down Expand Up @@ -414,7 +418,7 @@ public List<ActionType<? extends ActionResponse>> getClientActions() {

@Override
public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
return Arrays.asList(
return Stream.of(
// graph
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.GRAPH, GraphFeatureSetUsage::new),
// logstash
Expand Down Expand Up @@ -545,6 +549,13 @@ public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
),
// Data Streams
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_STREAMS, DataStreamFeatureSetUsage::new),
DataLifecycle.isEnabled()
? new NamedWriteableRegistry.Entry(
XPackFeatureSet.Usage.class,
XPackField.DATA_LIFECYCLE,
DataLifecycleFeatureSetUsage::new
)
: null,
// Data Tiers
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.DATA_TIERS, DataTiersFeatureSetUsage::new),
// Archive
Expand All @@ -561,7 +572,7 @@ public List<NamedWriteableRegistry.Entry> getNamedWriteables() {
XPackField.ENTERPRISE_SEARCH,
EnterpriseSearchFeatureSetUsage::new
)
);
).filter(Objects::nonNull).toList();
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ public final class XPackField {
public static final String SEARCHABLE_SNAPSHOTS = "searchable_snapshots";
/** Name constant for the data streams feature. */
public static final String DATA_STREAMS = "data_streams";
/** Name constant for the data lifecycle feature. */
public static final String DATA_LIFECYCLE = "data_lifecycle";
/** Name constant for the data tiers feature. */
public static final String DATA_TIERS = "data_tiers";
/** Name constant for the aggregate_metric plugin. */
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import org.elasticsearch.action.support.TransportAction;
import org.elasticsearch.client.internal.Client;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.DataLifecycle;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
Expand Down Expand Up @@ -74,6 +75,7 @@
import org.elasticsearch.xcontent.NamedXContentRegistry;
import org.elasticsearch.xpack.cluster.routing.allocation.DataTierAllocationDecider;
import org.elasticsearch.xpack.cluster.routing.allocation.mapper.DataTierFieldMapper;
import org.elasticsearch.xpack.core.action.DataLifecycleUsageTransportAction;
import org.elasticsearch.xpack.core.action.DataStreamInfoTransportAction;
import org.elasticsearch.xpack.core.action.DataStreamUsageTransportAction;
import org.elasticsearch.xpack.core.action.ReloadAnalyzerAction;
Expand Down Expand Up @@ -355,6 +357,9 @@ public Collection<Object> createComponents(
actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_TIERS, DataTiersUsageTransportAction.class));
actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_STREAMS, DataStreamUsageTransportAction.class));
actions.add(new ActionHandler<>(XPackInfoFeatureAction.DATA_STREAMS, DataStreamInfoTransportAction.class));
if (DataLifecycle.isEnabled()) {
actions.add(new ActionHandler<>(XPackUsageFeatureAction.DATA_LIFECYCLE, DataLifecycleUsageTransportAction.class));
}
actions.add(new ActionHandler<>(XPackUsageFeatureAction.HEALTH, HealthApiUsageTransportAction.class));
actions.add(new ActionHandler<>(XPackUsageFeatureAction.REMOTE_CLUSTERS, RemoteClusterUsageTransportAction.class));
return actions;
Expand Down
Loading