diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java index 85ab7a1dbaace..c79aac2afaf1a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthRequestBuilder.java @@ -26,7 +26,8 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.unit.TimeValue; -public class ClusterHealthRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class ClusterHealthRequestBuilder + extends MasterNodeReadOperationRequestBuilder { public ClusterHealthRequestBuilder(ElasticsearchClient client, ClusterHealthAction action) { super(client, action, new ClusterHealthRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 255f70c56fe6b..7b2cd8d9ebb3b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -47,7 +47,8 @@ import java.util.function.Predicate; -public class TransportClusterHealthAction extends TransportMasterNodeReadAction { +public class TransportClusterHealthAction + extends TransportMasterNodeReadAction { private final GatewayAllocator gatewayAllocator; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java index 1709151e824d9..02a7cf3ebdde4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; -public class NodesHotThreadsRequestBuilder extends NodesOperationRequestBuilder { +public class NodesHotThreadsRequestBuilder + extends NodesOperationRequestBuilder { public NodesHotThreadsRequestBuilder(ElasticsearchClient client, NodesHotThreadsAction action) { super(client, action, new NodesHotThreadsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java index 113766f3e9f21..c5dfb77e6c4b9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class NodesStatsRequestBuilder extends NodesOperationRequestBuilder { +public class NodesStatsRequestBuilder + extends NodesOperationRequestBuilder { public NodesStatsRequestBuilder(ElasticsearchClient client, NodesStatsAction action) { super(client, action, new NodesStatsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java index b6e8de24c5d7b..6b1fc0a8ed13e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/DeleteRepositoryRequestBuilder.java @@ -26,7 +26,8 @@ /** * Builder for unregister repository request */ -public class DeleteRepositoryRequestBuilder extends AcknowledgedRequestBuilder { +public class DeleteRepositoryRequestBuilder + extends AcknowledgedRequestBuilder { /** * Constructs unregister repository request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java index a39bb12e31e24..58ac157feb9ba 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/delete/TransportDeleteRepositoryAction.java @@ -46,7 +46,8 @@ public class TransportDeleteRepositoryAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, + final ActionListener listener) { repositoriesService.unregisterRepository( new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name()) .masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java index e21aa19f7f849..d20915e617b3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/GetRepositoriesRequestBuilder.java @@ -26,7 +26,8 @@ /** * Get repository request builder */ -public class GetRepositoriesRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class GetRepositoriesRequestBuilder + extends MasterNodeReadOperationRequestBuilder { /** * Creates new get repository request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index c7474fc28cc05..62c7c1429202f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -50,8 +50,10 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadActio @Inject public TransportGetRepositoriesAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, GetRepositoriesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetRepositoriesRequest::new, indexNameExpressionResolver); + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, GetRepositoriesAction.NAME, transportService, clusterService, threadPool, actionFilters, + GetRepositoriesRequest::new, indexNameExpressionResolver); } @Override @@ -70,7 +72,8 @@ protected ClusterBlockException checkBlock(GetRepositoriesRequest request, Clust } @Override - protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener listener) { + protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, + final ActionListener listener) { MetaData metaData = state.metaData(); RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java index ea62bb4eee60a..9f17a6ac43a3c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestBuilder.java @@ -30,7 +30,8 @@ /** * Register repository request builder */ -public class PutRepositoryRequestBuilder extends AcknowledgedRequestBuilder { +public class PutRepositoryRequestBuilder + extends AcknowledgedRequestBuilder { /** * Constructs register repository request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java index 6f0339f46eecf..cae9c4e117f79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/TransportPutRepositoryAction.java @@ -46,7 +46,8 @@ public class TransportPutRepositoryAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final PutRepositoryRequest request, ClusterState state, + final ActionListener listener) { repositoriesService.registerRepository( new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]", diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 4614085f26e2b..17f9efd07c812 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -46,7 +46,8 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, + final ActionListener listener) { repositoriesService.verifyRepository(request.name(), new ActionListener() { @Override public void onResponse(RepositoriesService.VerifyResponse verifyResponse) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java index 752ceff357a7d..c83443eab9a0c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryRequestBuilder.java @@ -25,7 +25,8 @@ /** * Builder for unregister repository request */ -public class VerifyRepositoryRequestBuilder extends MasterNodeOperationRequestBuilder { +public class VerifyRepositoryRequestBuilder + extends MasterNodeOperationRequestBuilder { /** * Constructs unregister repository request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 108ce586573d7..5cd1818c7b6c0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -43,9 +43,11 @@ public class TransportClusterRerouteAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, + final ActionListener listener) { ActionListener logWrapper = ActionListener.wrap( response -> { if (request.dryRun() == false) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java index 46ee53aaf97ab..b7aa57cd6e87a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsRequestBuilder.java @@ -29,7 +29,8 @@ /** * Builder for a cluster update settings request */ -public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder { +public class ClusterUpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder { public ClusterUpdateSettingsRequestBuilder(ElasticsearchClient client, ClusterUpdateSettingsAction action) { super(client, action, new ClusterUpdateSettingsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java index 92edcc5649631..df1028a32b977 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class ClusterSearchShardsRequestBuilder extends MasterNodeReadOperationRequestBuilder { public ClusterSearchShardsRequestBuilder(ElasticsearchClient client, ClusterSearchShardsAction action) { super(client, action, new ClusterSearchShardsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java index f4f36ca4d65e9..69dea68862eb0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/TransportClusterSearchShardsAction.java @@ -97,8 +97,8 @@ protected void masterOperation(final ClusterSearchShardsRequest request, final C } Set nodeIds = new HashSet<>(); - GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, - routingMap, request.preference()); + GroupShardsIterator groupShardsIterator = clusterService.operationRouting() + .searchShards(clusterState, concreteIndices, routingMap, request.preference()); ShardRouting shard; ClusterSearchShardsGroup[] groupResponses = new ClusterSearchShardsGroup[groupShardsIterator.size()]; int currentGroup = 0; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java index 4022d0497c018..909b0a6360bc3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequestBuilder.java @@ -30,7 +30,8 @@ /** * Create snapshot request builder */ -public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { +public class CreateSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs a new create snapshot request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index a7a5548552be2..18e985884c635 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -45,7 +45,8 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, + final ActionListener listener) { final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); SnapshotsService.SnapshotRequest snapshotRequest = new SnapshotsService.SnapshotRequest(request.repository(), snapshotName, "create_snapshot [" + snapshotName + "]") diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java index 9b723de0e6caa..1e47160903c85 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/DeleteSnapshotRequestBuilder.java @@ -26,7 +26,8 @@ /** * Delete snapshot request builder */ -public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { +public class DeleteSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs delete snapshot request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index b3c9f089e6301..c3ebb9129b7c8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -44,7 +44,8 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, + final ActionListener listener) { snapshotsService.deleteSnapshot(request.repository(), request.snapshot(), new SnapshotsService.DeleteSnapshotListener() { @Override public void onResponse() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java index 2115bd0bc3b81..052f8da0c7508 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequestBuilder.java @@ -26,7 +26,8 @@ /** * Get snapshots request builder */ -public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilder { +public class GetSnapshotsRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs the new get snapshot request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 8e42ef4dbee29..f530261644776 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -31,7 +31,8 @@ /** * Restore snapshot request builder */ -public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { +public class RestoreSnapshotRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs new restore snapshot request builder diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java index 32d4800676295..3b3a79aadc201 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java @@ -53,7 +53,8 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction listener) { + protected void masterOperation(final RestoreSnapshotRequest request, final ClusterState state, + final ActionListener listener) { RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(request.repository(), request.snapshot(), request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(), request.settings(), request.masterNodeTimeout(), request.includeGlobalState(), request.partial(), request.includeAliases(), diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java index 37d8ad04d0e7e..0424f858d3388 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusRequestBuilder.java @@ -26,7 +26,8 @@ /** * Snapshots status request builder */ -public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBuilder { +public class SnapshotsStatusRequestBuilder extends MasterNodeOperationRequestBuilder { /** * Constructs the new snapshot status request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 949918f88a10a..206170b583f5f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -69,7 +69,8 @@ public TransportSnapshotsStatusAction(Settings settings, TransportService transp ThreadPool threadPool, SnapshotsService snapshotsService, TransportNodesSnapshotsStatus transportNodesSnapshotsStatus, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, SnapshotsStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, SnapshotsStatusRequest::new, indexNameExpressionResolver); + super(settings, SnapshotsStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, + SnapshotsStatusRequest::new, indexNameExpressionResolver); this.snapshotsService = snapshotsService; this.transportNodesSnapshotsStatus = transportNodesSnapshotsStatus; } @@ -116,7 +117,8 @@ protected void masterOperation(final SnapshotsStatusRequest request, snapshots[i] = currentSnapshots.get(i).snapshot(); } - TransportNodesSnapshotsStatus.Request nodesRequest = new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()])) + TransportNodesSnapshotsStatus.Request nodesRequest = + new TransportNodesSnapshotsStatus.Request(nodesIds.toArray(new String[nodesIds.size()])) .snapshots(snapshots).timeout(request.masterNodeTimeout()); transportNodesSnapshotsStatus.execute(nodesRequest, new ActionListener() { @Override @@ -143,7 +145,8 @@ public void onFailure(Exception e) { } private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, List currentSnapshotEntries, - TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) throws IOException { + TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) + throws IOException { // First process snapshot that are currently processed List builder = new ArrayList<>(); Set currentSnapshotNames = new HashSet<>(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java index 524e167e3a265..35020556b1ed3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class ClusterStateRequestBuilder extends MasterNodeReadOperationRequestBuilder { public ClusterStateRequestBuilder(ElasticsearchClient client, ClusterStateAction action) { super(client, action, new ClusterStateRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index b7ef075a59afa..cfc716f4b8d98 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -45,9 +45,11 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction { +public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder { public ClusterStatsRequestBuilder(ElasticsearchClient client, ClusterStatsAction action) { super(client, action, new ClusterStatsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index 66b258670c128..bd6038f3f9ad6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -53,7 +53,8 @@ public class TransportClusterStatsAction extends TransportNodesAction { private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store, - CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments); + CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, + CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments); private final NodeService nodeService; private final IndicesService indicesService; @@ -126,7 +127,8 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq clusterStatus = new ClusterStateHealth(clusterService.state()).getStatus(); } - return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()])); + return new ClusterStatsNodeResponse(nodeInfo.getNode(), clusterStatus, nodeInfo, nodeStats, + shardsStats.toArray(new ShardStats[shardsStats.size()])); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java index 029ba7414abb5..062b21cd4cd85 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksRequestBuilder.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; -public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder { +public class PendingClusterTasksRequestBuilder extends MasterNodeReadOperationRequestBuilder { public PendingClusterTasksRequestBuilder(ElasticsearchClient client, PendingClusterTasksAction action) { super(client, action, new PendingClusterTasksRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 542b2dd8badc4..baef90a7ce0a6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -35,14 +35,17 @@ import java.util.List; -public class TransportPendingClusterTasksAction extends TransportMasterNodeReadAction { +public class TransportPendingClusterTasksAction + extends TransportMasterNodeReadAction { private final ClusterService clusterService; @Inject public TransportPendingClusterTasksAction(Settings settings, TransportService transportService, ClusterService clusterService, - ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { - super(settings, PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters, PendingClusterTasksRequest::new, indexNameExpressionResolver); + ThreadPool threadPool, ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver) { + super(settings, PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters, + PendingClusterTasksRequest::new, indexNameExpressionResolver); this.clusterService = clusterService; } @@ -63,7 +66,8 @@ protected PendingClusterTasksResponse newResponse() { } @Override - protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener listener) { + protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, + ActionListener listener) { logger.trace("fetching pending tasks from cluster service"); final List pendingTasks = clusterService.getMasterService().pendingTasks(); logger.trace("done fetching pending tasks from cluster service"); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index d191bc0175606..d97f490cdfb44 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -138,7 +138,8 @@ public void waitForNextChange(Listener listener, Predicate statePr timeoutTimeLeftMS = timeOutValue.millis() - timeSinceStartMS; if (timeoutTimeLeftMS <= 0L) { // things have timeout while we were busy -> notify - logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStartMS)); + logger.trace("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", + timeOutValue, new TimeValue(timeSinceStartMS)); // update to latest, in case people want to retry timedOut = true; lastObservedState.set(new StoredState(clusterApplierService.state())); @@ -169,7 +170,8 @@ public void waitForNextChange(Listener listener, Predicate statePr if (!observingContext.compareAndSet(null, context)) { throw new ElasticsearchException("already waiting for a cluster state change"); } - clusterApplierService.addTimeoutListener(timeoutTimeLeftMS == null ? null : new TimeValue(timeoutTimeLeftMS), clusterStateListener); + clusterApplierService.addTimeoutListener(timeoutTimeLeftMS == null ? + null : new TimeValue(timeoutTimeLeftMS), clusterStateListener); } } @@ -190,7 +192,8 @@ public void clusterChanged(ClusterChangedEvent event) { lastObservedState.set(new StoredState(state)); context.listener.onNewClusterState(state); } else { - logger.trace("observer: predicate approved change but observing context has changed - ignoring (new cluster state version [{}])", state.version()); + logger.trace("observer: predicate approved change but observing context has changed " + + "- ignoring (new cluster state version [{}])", state.version()); } } else { logger.trace("observer: predicate rejected change (new cluster state version [{}])", state.version()); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java index 9dc9c7f6f52d0..662a6e00954f3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterStateUpdateTask.java @@ -28,7 +28,8 @@ /** * A task that can update the cluster state. */ -public abstract class ClusterStateUpdateTask implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { +public abstract class ClusterStateUpdateTask + implements ClusterStateTaskConfig, ClusterStateTaskExecutor, ClusterStateTaskListener { private final Priority priority; @@ -41,7 +42,8 @@ public ClusterStateUpdateTask(Priority priority) { } @Override - public final ClusterTasksResult execute(ClusterState currentState, List tasks) throws Exception { + public final ClusterTasksResult execute(ClusterState currentState, List tasks) + throws Exception { ClusterState result = execute(currentState); return ClusterTasksResult.builder().successes(tasks).build(result); } diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index f7bb42b8dc368..78eceeb12bcca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -68,7 +68,8 @@ public static KeySerializer getVIntKeySerializer() { /** * Calculates diff between two ImmutableOpenMaps of Diffable objects */ - public static > MapDiff> diff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer) { + public static > MapDiff> diff(ImmutableOpenMap before, + ImmutableOpenMap after, KeySerializer keySerializer) { assert after != null && before != null; return new ImmutableOpenMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); } @@ -76,7 +77,8 @@ public static > MapDiff> d /** * Calculates diff between two ImmutableOpenMaps of non-diffable objects */ - public static MapDiff> diff(ImmutableOpenMap before, ImmutableOpenMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { + public static MapDiff> diff(ImmutableOpenMap before, + ImmutableOpenMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { assert after != null && before != null; return new ImmutableOpenMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -84,7 +86,8 @@ public static MapDiff> diff(ImmutableOpenMap /** * Calculates diff between two ImmutableOpenIntMaps of Diffable objects */ - public static > MapDiff> diff(ImmutableOpenIntMap before, ImmutableOpenIntMap after, KeySerializer keySerializer) { + public static > MapDiff> diff(ImmutableOpenIntMap before, + ImmutableOpenIntMap after, KeySerializer keySerializer) { assert after != null && before != null; return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); } @@ -92,7 +95,8 @@ public static > MapDiff /** * Calculates diff between two ImmutableOpenIntMaps of non-diffable objects */ - public static MapDiff> diff(ImmutableOpenIntMap before, ImmutableOpenIntMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { + public static MapDiff> diff(ImmutableOpenIntMap before, + ImmutableOpenIntMap after, KeySerializer keySerializer, ValueSerializer valueSerializer) { assert after != null && before != null; return new ImmutableOpenIntMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -100,7 +104,8 @@ public static MapDiff> diff(ImmutableOpen /** * Calculates diff between two Maps of Diffable objects. */ - public static > MapDiff> diff(Map before, Map after, KeySerializer keySerializer) { + public static > MapDiff> diff(Map before, + Map after, KeySerializer keySerializer) { assert after != null && before != null; return new JdkMapDiff<>(before, after, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); } @@ -108,7 +113,8 @@ public static > MapDiff> diff(Map /** * Calculates diff between two Maps of non-diffable objects */ - public static MapDiff> diff(Map before, Map after, KeySerializer keySerializer, ValueSerializer valueSerializer) { + public static MapDiff> diff(Map before, Map after, KeySerializer keySerializer, + ValueSerializer valueSerializer) { assert after != null && before != null; return new JdkMapDiff<>(before, after, keySerializer, valueSerializer); } @@ -116,42 +122,48 @@ public static MapDiff> diff(Map before, Map a /** * Loads an object that represents difference between two ImmutableOpenMaps */ - public static MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + public static MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, + ValueSerializer valueSerializer) throws IOException { return new ImmutableOpenMapDiff<>(in, keySerializer, valueSerializer); } /** * Loads an object that represents difference between two ImmutableOpenMaps */ - public static MapDiff> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + public static MapDiff> readImmutableOpenIntMapDiff(StreamInput in, + KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { return new ImmutableOpenIntMapDiff<>(in, keySerializer, valueSerializer); } /** * Loads an object that represents difference between two Maps of Diffable objects */ - public static MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + public static MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, + ValueSerializer valueSerializer) throws IOException { return new JdkMapDiff<>(in, keySerializer, valueSerializer); } /** * Loads an object that represents difference between two ImmutableOpenMaps of Diffable objects using Diffable proto object */ - public static > MapDiff> readImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + public static > MapDiff> readImmutableOpenMapDiff(StreamInput in, + KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { return new ImmutableOpenMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } /** * Loads an object that represents difference between two ImmutableOpenIntMaps of Diffable objects using Diffable proto object */ - public static > MapDiff> readImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + public static > MapDiff> readImmutableOpenIntMapDiff(StreamInput in, + KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { return new ImmutableOpenIntMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } /** * Loads an object that represents difference between two Maps of Diffable objects using Diffable proto object */ - public static > MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, Reader reader, Reader> diffReader) throws IOException { + public static > MapDiff> readJdkMapDiff(StreamInput in, KeySerializer keySerializer, + Reader reader, Reader> diffReader) throws IOException { return new JdkMapDiff<>(in, keySerializer, new DiffableValueReader<>(reader, diffReader)); } @@ -217,7 +229,8 @@ public Map apply(Map map) { */ public static class ImmutableOpenMapDiff extends MapDiff> { - protected ImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + protected ImmutableOpenMapDiff(StreamInput in, KeySerializer keySerializer, + ValueSerializer valueSerializer) throws IOException { super(in, keySerializer, valueSerializer); } @@ -293,7 +306,8 @@ public ImmutableOpenMap apply(ImmutableOpenMap map) { */ private static class ImmutableOpenIntMapDiff extends MapDiff> { - protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, ValueSerializer valueSerializer) throws IOException { + protected ImmutableOpenIntMapDiff(StreamInput in, KeySerializer keySerializer, + ValueSerializer valueSerializer) throws IOException { super(in, keySerializer, valueSerializer); } diff --git a/server/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java b/server/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java index fb5f2334969d6..c225cb551306d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java +++ b/server/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java @@ -33,7 +33,8 @@ public IncompatibleClusterStateVersionException(String msg) { } public IncompatibleClusterStateVersionException(long expectedVersion, String expectedUuid, long receivedVersion, String receivedUuid) { - super("Expected diff for version " + expectedVersion + " with uuid " + expectedUuid + " got version " + receivedVersion + " and uuid " + receivedUuid); + super("Expected diff for version " + expectedVersion + " with uuid " + expectedUuid + " got version " + + receivedVersion + " and uuid " + receivedUuid); } public IncompatibleClusterStateVersionException(StreamInput in) throws IOException{ diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index dbfc4b3445e07..a22c2099106b7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -102,7 +102,8 @@ public InternalClusterInfoService(Settings settings, ClusterService clusterServi ClusterSettings clusterSettings = clusterService.getClusterSettings(); clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING, this::setFetchTimeout); clusterSettings.addSettingsUpdateConsumer(INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING, this::setUpdateFrequency); - clusterSettings.addSettingsUpdateConsumer(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, this::setEnabled); + clusterSettings.addSettingsUpdateConsumer(DiskThresholdSettings.CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING, + this::setEnabled); // Add InternalClusterInfoService to listen for Master changes this.clusterService.addLocalNodeMasterListener(this); @@ -400,7 +401,8 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, String nodeId = nodeStats.getNode().getId(); String nodeName = nodeStats.getNode().getName(); if (logger.isTraceEnabled()) { - logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", + logger.trace("node: [{}], most available: total disk: {}," + + " available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable()); } @@ -410,7 +412,8 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, nodeId, leastAvailablePath.getTotal().getBytes()); } } else { - newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().getBytes(), leastAvailablePath.getAvailable().getBytes())); + newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), + leastAvailablePath.getTotal().getBytes(), leastAvailablePath.getAvailable().getBytes())); } if (mostAvailablePath.getTotal().getBytes() < 0) { if (logger.isTraceEnabled()) { @@ -418,7 +421,8 @@ static void fillDiskUsagePerNode(Logger logger, List nodeStatsArray, nodeId, mostAvailablePath.getTotal().getBytes()); } } else { - newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().getBytes(), mostAvailablePath.getAvailable().getBytes())); + newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), + mostAvailablePath.getTotal().getBytes(), mostAvailablePath.getAvailable().getBytes())); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java b/server/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java index 9f7fb00e19523..0b17cfecf6c34 100644 --- a/server/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/LocalNodeMasterListener.java @@ -37,10 +37,11 @@ public interface LocalNodeMasterListener { /** * The name of the executor that the implementation of the callbacks of this lister should be executed on. The thread * that is responsible for managing instances of this lister is the same thread handling the cluster state events. If - * the work done is the callbacks above is inexpensive, this value may be {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME} - * (indicating that the callbacks will run on the same thread as the cluster state events are fired with). On the other hand, - * if the logic in the callbacks are heavier and take longer to process (or perhaps involve blocking due to IO operations), - * prefer to execute them on a separate more appropriate executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC} + * the work done is the callbacks above is inexpensive, this value may be + * {@link org.elasticsearch.threadpool.ThreadPool.Names#SAME SAME} (indicating that the callbacks will run on the same thread + * as the cluster state events are fired with). On the other hand, if the logic in the callbacks are heavier and take + * longer to process (or perhaps involve blocking due to IO operations), prefer to execute them on a separate more appropriate + * executor (eg. {@link org.elasticsearch.threadpool.ThreadPool.Names#GENERIC GENERIC} * or {@link org.elasticsearch.threadpool.ThreadPool.Names#MANAGEMENT MANAGEMENT}). * * @return The name of the executor that will run the callbacks of this listener. diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index 2559c14848d76..5a3e6e326bd22 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -52,7 +52,8 @@ public NodeMappingRefreshAction(Settings settings, TransportService transportSer super(settings); this.transportService = transportService; this.metaDataMappingService = metaDataMappingService; - transportService.registerRequestHandler(ACTION_NAME, NodeMappingRefreshRequest::new, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); + transportService.registerRequestHandler(ACTION_NAME, + NodeMappingRefreshRequest::new, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); } public void nodeMappingRefresh(final DiscoveryNode masterNode, final NodeMappingRefreshRequest request) { diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 0949e47cd0527..81d6a806143b4 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -96,12 +96,17 @@ public ShardStateAction(Settings settings, ClusterService clusterService, Transp this.clusterService = clusterService; this.threadPool = threadPool; - transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ThreadPool.Names.SAME, StartedShardEntry::new, new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger)); - transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ThreadPool.Names.SAME, FailedShardEntry::new, new ShardFailedTransportHandler(clusterService, new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger)); + transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ThreadPool.Names.SAME, StartedShardEntry::new, + new ShardStartedTransportHandler(clusterService, new ShardStartedClusterStateTaskExecutor(allocationService, logger), logger)); + transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ThreadPool.Names.SAME, FailedShardEntry::new, + new ShardFailedTransportHandler(clusterService, + new ShardFailedClusterStateTaskExecutor(allocationService, routingService, logger), logger)); } - private void sendShardAction(final String actionName, final ClusterState currentState, final TransportRequest request, final Listener listener) { - ClusterStateObserver observer = new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); + private void sendShardAction(final String actionName, final ClusterState currentState, + final TransportRequest request, final Listener listener) { + ClusterStateObserver observer = + new ClusterStateObserver(currentState, clusterService, null, logger, threadPool.getThreadContext()); DiscoveryNode masterNode = currentState.nodes().getMasterNode(); Predicate changePredicate = MasterNodeChangePredicate.build(currentState); if (masterNode == null) { @@ -121,8 +126,11 @@ public void handleException(TransportException exp) { if (isMasterChannelException(exp)) { waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); } else { - logger.warn(new ParameterizedMessage("unexpected failure while sending request [{}] to [{}] for shard entry [{}]", actionName, masterNode, request), exp); - listener.onFailure(exp instanceof RemoteTransportException ? (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : new ElasticsearchException(exp.getCause())) : exp); + logger.warn(new ParameterizedMessage("unexpected failure while sending request [{}]" + + " to [{}] for shard entry [{}]", actionName, masterNode, request), exp); + listener.onFailure(exp instanceof RemoteTransportException ? + (Exception) (exp.getCause() instanceof Exception ? exp.getCause() : + new ElasticsearchException(exp.getCause())) : exp); } } }); @@ -152,7 +160,8 @@ private static boolean isMasterChannelException(TransportException exp) { * @param failure the underlying cause of the failure * @param listener callback upon completion of the request */ - public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, boolean markAsStale, final String message, @Nullable final Exception failure, Listener listener) { + public void remoteShardFailed(final ShardId shardId, String allocationId, long primaryTerm, boolean markAsStale, final String message, + @Nullable final Exception failure, Listener listener) { assert primaryTerm > 0L : "primary term should be strictly positive"; final FailedShardEntry shardEntry = new FailedShardEntry(shardId, allocationId, primaryTerm, message, failure, markAsStale); final CompositeListener compositeListener = new CompositeListener(listener); @@ -188,21 +197,24 @@ int remoteShardFailedCacheSize() { /** * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. */ - public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener) { + public void localShardFailed(final ShardRouting shardRouting, final String message, + @Nullable final Exception failure, Listener listener) { localShardFailed(shardRouting, message, failure, listener, clusterService.state()); } /** * Send a shard failed request to the master node to update the cluster state when a shard on the local node failed. */ - public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, Listener listener, - final ClusterState currentState) { - FailedShardEntry shardEntry = new FailedShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), 0L, message, failure, true); + public void localShardFailed(final ShardRouting shardRouting, final String message, @Nullable final Exception failure, + Listener listener, final ClusterState currentState) { + FailedShardEntry shardEntry = new FailedShardEntry(shardRouting.shardId(), shardRouting.allocationId().getId(), + 0L, message, failure, true); sendShardAction(SHARD_FAILED_ACTION_NAME, currentState, shardEntry, listener); } // visible for testing - protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, TransportRequest request, Listener listener, Predicate changePredicate) { + protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, + TransportRequest request, Listener listener, Predicate changePredicate) { observer.waitForNextChange(new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { @@ -231,7 +243,8 @@ private static class ShardFailedTransportHandler implements TransportRequestHand private final ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor; private final Logger logger; - ShardFailedTransportHandler(ClusterService clusterService, ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) { + ShardFailedTransportHandler(ClusterService clusterService, + ShardFailedClusterStateTaskExecutor shardFailedClusterStateTaskExecutor, Logger logger) { this.clusterService = clusterService; this.shardFailedClusterStateTaskExecutor = shardFailedClusterStateTaskExecutor; this.logger = logger; @@ -239,7 +252,8 @@ private static class ShardFailedTransportHandler implements TransportRequestHand @Override public void messageReceived(FailedShardEntry request, TransportChannel channel, Task task) throws Exception { - logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", request.shardId, request), request.failure); + logger.debug(() -> new ParameterizedMessage("{} received shard failed for {}", + request.shardId, request), request.failure); clusterService.submitStateUpdateTask( "shard-failed", request, @@ -248,12 +262,15 @@ public void messageReceived(FailedShardEntry request, TransportChannel channel, new ClusterStateTaskListener() { @Override public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", request.shardId, request), e); + logger.error(() -> new ParameterizedMessage("{} unexpected failure while failing shard [{}]", + request.shardId, request), e); try { channel.sendResponse(e); } catch (Exception channelException) { channelException.addSuppressed(e); - logger.warn(() -> new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", request.shardId, e, request), channelException); + logger.warn(() -> + new ParameterizedMessage("{} failed to send failure [{}] while failing shard [{}]", + request.shardId, e, request), channelException); } } @@ -263,7 +280,9 @@ public void onNoLongerMaster(String source) { try { channel.sendResponse(new NotMasterException(source)); } catch (Exception channelException) { - logger.warn(() -> new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> + new ParameterizedMessage("{} failed to send no longer master while failing shard [{}]", + request.shardId, request), channelException); } } @@ -272,7 +291,9 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS try { channel.sendResponse(TransportResponse.Empty.INSTANCE); } catch (Exception channelException) { - logger.warn(() -> new ParameterizedMessage("{} failed to send response while failing shard [{}]", request.shardId, request), channelException); + logger.warn(() -> + new ParameterizedMessage("{} failed to send response while failing shard [{}]", + request.shardId, request), channelException); } } } @@ -302,7 +323,8 @@ public ClusterTasksResult execute(ClusterState currentState, L IndexMetaData indexMetaData = currentState.metaData().index(task.shardId.getIndex()); if (indexMetaData == null) { // tasks that correspond to non-existent indices are marked as successful - logger.debug("{} ignoring shard failed task [{}] (unknown index {})", task.shardId, task, task.shardId.getIndex()); + logger.debug("{} ignoring shard failed task [{}] (unknown index {})", + task.shardId, task, task.shardId.getIndex()); batchResultBuilder.success(task); } else { // The primary term is 0 if the shard failed itself. It is > 0 if a write was done on a primary but was failed to be @@ -334,7 +356,8 @@ public ClusterTasksResult execute(ClusterState currentState, L // they were failed is because a write made it into the primary but not to this copy (which corresponds to // the check "primaryTerm > 0"). if (task.primaryTerm > 0 && inSyncAllocationIds.contains(task.allocationId)) { - logger.debug("{} marking shard {} as stale (shard failed task: [{}])", task.shardId, task.allocationId, task); + logger.debug("{} marking shard {} as stale (shard failed task: [{}])", + task.shardId, task.allocationId, task); tasksToBeApplied.add(task); staleShardsToBeApplied.add(new StaleShard(task.shardId, task.allocationId)); } else { @@ -406,7 +429,8 @@ public static class FailedShardEntry extends TransportRequest { } } - public FailedShardEntry(ShardId shardId, String allocationId, long primaryTerm, String message, Exception failure, boolean markAsStale) { + public FailedShardEntry(ShardId shardId, String allocationId, long primaryTerm, + String message, Exception failure, boolean markAsStale) { this.shardId = shardId; this.allocationId = allocationId; this.primaryTerm = primaryTerm; @@ -481,7 +505,8 @@ private static class ShardStartedTransportHandler implements TransportRequestHan private final ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor; private final Logger logger; - ShardStartedTransportHandler(ClusterService clusterService, ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) { + ShardStartedTransportHandler(ClusterService clusterService, + ShardStartedClusterStateTaskExecutor shardStartedClusterStateTaskExecutor, Logger logger) { this.clusterService = clusterService; this.shardStartedClusterStateTaskExecutor = shardStartedClusterStateTaskExecutor; this.logger = logger; @@ -500,7 +525,8 @@ public void messageReceived(StartedShardEntry request, TransportChannel channel, } } - public static class ShardStartedClusterStateTaskExecutor implements ClusterStateTaskExecutor, ClusterStateTaskListener { + public static class ShardStartedClusterStateTaskExecutor + implements ClusterStateTaskExecutor, ClusterStateTaskListener { private final AllocationService allocationService; private final Logger logger; @@ -528,13 +554,14 @@ public ClusterTasksResult execute(ClusterState currentState, if (matched.initializing() == false) { assert matched.active() : "expected active shard routing for task " + task + " but found " + matched; // same as above, this might have been a stale in-flight request, so we just ignore. - logger.debug("{} ignoring shard started task [{}] (shard exists but is not initializing: {})", task.shardId, task, - matched); + logger.debug("{} ignoring shard started task [{}] (shard exists but is not initializing: {})", + task.shardId, task, matched); builder.success(task); } else { // remove duplicate actions as allocation service expects a clean list without duplicates if (seenShardRoutings.contains(matched)) { - logger.trace("{} ignoring shard started task [{}] (already scheduled to start {})", task.shardId, task, matched); + logger.trace("{} ignoring shard started task [{}] (already scheduled to start {})", + task.shardId, task, matched); tasksToBeApplied.add(task); } else { logger.debug("{} starting shard {} (shard started task: [{}])", task.shardId, matched, task); diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java index fc09741f4d9c2..fafd397722025 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlock.java @@ -50,8 +50,8 @@ public class ClusterBlock implements Streamable, ToXContentFragment { ClusterBlock() { } - public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, boolean allowReleaseResources, RestStatus status, - EnumSet levels) { + public ClusterBlock(int id, String description, boolean retryable, boolean disableStatePersistence, boolean allowReleaseResources, + RestStatus status, EnumSet levels) { this.id = id; this.description = description; this.retryable = retryable; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java index 497dc49198bfc..dd11175395114 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AliasOrIndex.java @@ -41,7 +41,8 @@ public interface AliasOrIndex { boolean isAlias(); /** - * @return All {@link IndexMetaData} of all concrete indices this alias is referring to or if this is a concrete index its {@link IndexMetaData} + * @return All {@link IndexMetaData} of all concrete indices this alias is referring to + * or if this is a concrete index its {@link IndexMetaData} */ List getIndices(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java index 789b01c0cfa9d..c336549958a1e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/AutoExpandReplicas.java @@ -51,13 +51,15 @@ private static AutoExpandReplicas parse(String value) { } final int dash = value.indexOf('-'); if (-1 == dash) { - throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash); + throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + + "] from value: [" + value + "] at index " + dash); } final String sMin = value.substring(0, dash); try { min = Integer.parseInt(sMin); } catch (NumberFormatException e) { - throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e); + throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + + "] from value: [" + value + "] at index " + dash, e); } String sMax = value.substring(dash + 1); if (sMax.equals(ALL_NODES_VALUE)) { @@ -66,7 +68,8 @@ private static AutoExpandReplicas parse(String value) { try { max = Integer.parseInt(sMax); } catch (NumberFormatException e) { - throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] from value: [" + value + "] at index " + dash, e); + throw new IllegalArgumentException("failed to parse [" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + + "] from value: [" + value + "] at index " + dash, e); } } return new AutoExpandReplicas(min, max, true); @@ -78,7 +81,8 @@ private static AutoExpandReplicas parse(String value) { private AutoExpandReplicas(int minReplicas, int maxReplicas, boolean enabled) { if (minReplicas > maxReplicas) { - throw new IllegalArgumentException("[" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + "] minReplicas must be =< maxReplicas but wasn't " + minReplicas + " > " + maxReplicas); + throw new IllegalArgumentException("[" + IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS + + "] minReplicas must be =< maxReplicas but wasn't " + minReplicas + " > " + maxReplicas); } this.minReplicas = minReplicas; this.maxReplicas = maxReplicas; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index b4a7733c62eed..b1b092e008679 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -80,11 +80,21 @@ public class IndexMetaData implements Diffable, ToXContentFragment { - public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); - public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); - public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); - public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ)); - public static final ClusterBlock INDEX_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock(12, "index read-only / allow delete (api)", false, false, true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE)); + public static final ClusterBlock INDEX_READ_ONLY_BLOCK = + new ClusterBlock(5, "index read-only (api)", false, false, false, + RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final ClusterBlock INDEX_READ_BLOCK = + new ClusterBlock(7, "index read (api)", false, false, false, + RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); + public static final ClusterBlock INDEX_WRITE_BLOCK = + new ClusterBlock(8, "index write (api)", false, false, false, + RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); + public static final ClusterBlock INDEX_METADATA_BLOCK = + new ClusterBlock(9, "index metadata (api)", false, false, false, + RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ)); + public static final ClusterBlock INDEX_READ_ONLY_ALLOW_DELETE_BLOCK = + new ClusterBlock(12, "index read-only / allow delete (api)", false, false, + true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.WRITE)); public enum State { OPEN((byte) 0), @@ -122,9 +132,9 @@ public static State fromString(String state) { static Setting buildNumberOfShardsSetting() { /* This is a safety limit that should only be exceeded in very rare and special cases. The assumption is that * 99% of the users have less than 1024 shards per index. We also make it a hard check that requires restart of nodes - * if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards per cluster. - * this also prevents creating stuff like a new index with millions of shards by accident which essentially kills the entire cluster - * with OOM on the spot.*/ + * if a cluster should allow to create more than 1024 shards per index. NOTE: this does not limit the number of shards + * per cluster. this also prevents creating stuff like a new index with millions of shards by accident which essentially + * kills the entire cluster with OOM on the spot.*/ final int maxNumShards = Integer.parseInt(System.getProperty("es.index.max_number_of_shards", "1024")); if (maxNumShards < 1) { throw new IllegalArgumentException("es.index.max_number_of_shards must be > 0"); @@ -144,7 +154,8 @@ static Setting buildNumberOfShardsSetting() { Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.IndexScope); public static final Setting INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING = - Setting.intSetting("index.number_of_routing_shards", INDEX_NUMBER_OF_SHARDS_SETTING, 1, new Setting.Validator() { + Setting.intSetting("index.number_of_routing_shards", INDEX_NUMBER_OF_SHARDS_SETTING, + 1, new Setting.Validator() { @Override public void validate(Integer numRoutingShards, Map, Integer> settings) { Integer numShards = settings.get(INDEX_NUMBER_OF_SHARDS_SETTING); @@ -294,12 +305,15 @@ public Iterator> settings() { private final ActiveShardCount waitForActiveShards; private final ImmutableOpenMap rolloverInfos; - private IndexMetaData(Index index, long version, long mappingVersion, long settingsVersion, long[] primaryTerms, State state, int numberOfShards, int numberOfReplicas, Settings settings, + private IndexMetaData(Index index, long version, long mappingVersion, long settingsVersion, long[] primaryTerms, State state, + int numberOfShards, int numberOfReplicas, Settings settings, ImmutableOpenMap mappings, ImmutableOpenMap aliases, ImmutableOpenMap customData, ImmutableOpenIntMap> inSyncAllocationIds, - DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, + DiscoveryNodeFilters requireFilters, DiscoveryNodeFilters initialRecoveryFilters, + DiscoveryNodeFilters includeFilters, DiscoveryNodeFilters excludeFilters, Version indexCreatedVersion, Version indexUpgradedVersion, - int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, ImmutableOpenMap rolloverInfos) { + int routingNumShards, int routingPartitionSize, ActiveShardCount waitForActiveShards, + ImmutableOpenMap rolloverInfos) { this.index = index; this.version = version; @@ -1161,9 +1175,11 @@ public IndexMetaData build() { final String uuid = settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE); - return new IndexMetaData(new Index(index, uuid), version, mappingVersion, settingsVersion, primaryTerms, state, numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), - tmpAliases.build(), customMetaData.build(), filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, - indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, rolloverInfos.build()); + return new IndexMetaData(new Index(index, uuid), version, mappingVersion, settingsVersion, primaryTerms, state, + numberOfShards, numberOfReplicas, tmpSettings, mappings.build(), tmpAliases.build(), customMetaData.build(), + filledInSyncAllocationIds.build(), requireFilters, initialRecoveryFilters, includeFilters, excludeFilters, + indexCreatedVersion, indexUpgradedVersion, getRoutingNumShards(), routingPartitionSize, waitForActiveShards, + rolloverInfos.build()); } public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { @@ -1258,7 +1274,8 @@ public static IndexMetaData fromXContent(XContentParser parser) throws IOExcepti currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { String mappingType = currentFieldName; - Map mappingSource = MapBuilder.newMapBuilder().put(mappingType, parser.mapOrdered()).map(); + Map mappingSource = + MapBuilder.newMapBuilder().put(mappingType, parser.mapOrdered()).map(); builder.putMapping(new MappingMetaData(mappingType, mappingSource)); } else { throw new IllegalArgumentException("Unexpected token: " + token); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 9be5acc0561d9..66bd86b7961ce 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -260,7 +260,8 @@ public Index concreteSingleIndex(ClusterState state, IndicesRequest request) { String indexExpression = request.indices() != null && request.indices().length > 0 ? request.indices()[0] : null; Index[] indices = concreteIndices(state, request.indicesOptions(), indexExpression); if (indices.length != 1) { - throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); + throw new IllegalArgumentException("unable to return a single index as the index and options" + + " provided got resolved to multiple indices"); } return indices[0]; } @@ -797,7 +798,8 @@ private static Set expand(Context context, IndexMetaData.State excludeSt } private boolean isEmptyOrTrivialWildcard(List expressions) { - return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || Regex.isMatchAllPattern(expressions.get(0)))); + return expressions.isEmpty() || (expressions.size() == 1 && (MetaData.ALL.equals(expressions.get(0)) || + Regex.isMatchAllPattern(expressions.get(0)))); } private static List resolveEmptyOrTrivialWildcard(IndicesOptions options, MetaData metaData) { @@ -871,7 +873,8 @@ String resolveExpression(String expression, final Context context) { inDateFormat = true; inPlaceHolderSb.append(c); } else { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. invalid character in placeholder at position [{}]", new String(text, from, length), i); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]." + + " invalid character in placeholder at position [{}]", new String(text, from, length), i); } break; @@ -894,19 +897,22 @@ String resolveExpression(String expression, final Context context) { timeZone = ZoneOffset.UTC; } else { if (inPlaceHolderString.lastIndexOf(RIGHT_BOUND) != inPlaceHolderString.length() - 1) { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing closing `}` for date math format", inPlaceHolderString); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing closing `}`" + + " for date math format", inPlaceHolderString); } if (dateTimeFormatLeftBoundIndex == inPlaceHolderString.length() - 2) { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing date format", inPlaceHolderString); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]. missing date format", + inPlaceHolderString); } mathExpression = inPlaceHolderString.substring(0, dateTimeFormatLeftBoundIndex); - String dateFormatterPatternAndTimeZoneId = inPlaceHolderString.substring(dateTimeFormatLeftBoundIndex + 1, inPlaceHolderString.length() - 1); - int formatPatternTimeZoneSeparatorIndex = dateFormatterPatternAndTimeZoneId.indexOf(TIME_ZONE_BOUND); + String patternAndTZid = + inPlaceHolderString.substring(dateTimeFormatLeftBoundIndex + 1, inPlaceHolderString.length() - 1); + int formatPatternTimeZoneSeparatorIndex = patternAndTZid.indexOf(TIME_ZONE_BOUND); if (formatPatternTimeZoneSeparatorIndex != -1) { - dateFormatterPattern = dateFormatterPatternAndTimeZoneId.substring(0, formatPatternTimeZoneSeparatorIndex); - timeZone = ZoneId.of(dateFormatterPatternAndTimeZoneId.substring(formatPatternTimeZoneSeparatorIndex + 1)); + dateFormatterPattern = patternAndTZid.substring(0, formatPatternTimeZoneSeparatorIndex); + timeZone = ZoneId.of(patternAndTZid.substring(formatPatternTimeZoneSeparatorIndex + 1)); } else { - dateFormatterPattern = dateFormatterPatternAndTimeZoneId; + dateFormatterPattern = patternAndTZid; timeZone = ZoneOffset.UTC; } dateFormatter = DateFormatters.forPattern(dateFormatterPattern); @@ -937,8 +943,10 @@ String resolveExpression(String expression, final Context context) { case RIGHT_BOUND: if (!escapedChar) { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. invalid character at position [{}]. " + - "`{` and `}` are reserved characters and should be escaped when used as part of the index name using `\\` (e.g. `\\{text\\}`)", new String(text, from, length), i); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]." + + " invalid character at position [{}]. `{` and `}` are reserved characters and" + + " should be escaped when used as part of the index name using `\\` (e.g. `\\{text\\}`)", + new String(text, from, length), i); } default: beforePlaceHolderSb.append(c); @@ -947,7 +955,8 @@ String resolveExpression(String expression, final Context context) { } if (inPlaceHolder) { - throw new ElasticsearchParseException("invalid dynamic name expression [{}]. date math placeholder is open ended", new String(text, from, length)); + throw new ElasticsearchParseException("invalid dynamic name expression [{}]. date math placeholder is open ended", + new String(text, from, length)); } if (beforePlaceHolderSb.length() == 0) { throw new ElasticsearchParseException("nothing captured"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 19c3de722793a..acd28a55604d3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -132,14 +132,17 @@ public interface Custom extends NamedDiffable, ToXContentFragment, Clust public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope); - public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, - false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", + false, false, false, RestStatus.FORBIDDEN, + EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final Setting SETTING_READ_ONLY_ALLOW_DELETE_SETTING = Setting.boolSetting("cluster.blocks.read_only_allow_delete", false, Property.Dynamic, Property.NodeScope); - public static final ClusterBlock CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK = new ClusterBlock(13, "cluster read-only / allow delete (api)", - false, false, true, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); + public static final ClusterBlock CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK = + new ClusterBlock(13, "cluster read-only / allow delete (api)", + false, false, true, RestStatus.FORBIDDEN, + EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final MetaData EMPTY_META_DATA = builder().build(); @@ -575,11 +578,13 @@ public String resolveIndexRouting(@Nullable String routing, String aliasOrIndex) AliasMetaData aliasMd = alias.getFirstAliasMetaData(); if (aliasMd.indexRouting() != null) { if (aliasMd.indexRouting().indexOf(',') != -1) { - throw new IllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + aliasMd.getIndexRouting() + "] that resolved to several routing values, rejecting operation"); + throw new IllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + + aliasMd.getIndexRouting() + "] that resolved to several routing values, rejecting operation"); } if (routing != null) { if (!routing.equals(aliasMd.indexRouting())) { - throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); } } // Alias routing overrides the parent routing (if any). @@ -594,7 +599,8 @@ private void rejectSingleIndexOperation(String aliasOrIndex, AliasOrIndex result for (IndexMetaData indexMetaData : result.getIndices()) { indexNames[i++] = indexMetaData.getIndex().getName(); } - throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexNames) + "], can't execute a single index op"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + + Arrays.toString(indexNames) + "], can't execute a single index op"); } public boolean hasIndex(String index) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 47144047f6b3a..d0199e838438e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -338,7 +338,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { continue; } - //Allow templatesAliases to be templated by replacing a token with the name of the index that we are applying it to + // Allow templatesAliases to be templated by replacing a token with the + // name of the index that we are applying it to if (aliasMetaData.alias().contains("{index}")) { String templatedAlias = aliasMetaData.alias().replace("{index}", request.index()); aliasMetaData = AliasMetaData.newAliasMetaData(aliasMetaData, templatedAlias); @@ -468,7 +469,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { // the context is only used for validation so it's fine to pass fake values for the shard id and the current // timestamp - final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L, null); + final QueryShardContext queryShardContext = + indexService.newQueryShardContext(0, null, () -> 0L, null); for (Alias alias : request.aliases()) { if (Strings.hasLength(alias.filter())) { @@ -484,7 +486,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { // now, update the mappings with the actual source Map mappingsMetaData = new HashMap<>(); - for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), + mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { if (mapper != null) { MappingMetaData mappingMd = new MappingMetaData(mapper); mappingsMetaData.put(mapper.type(), mappingMd); @@ -632,7 +635,8 @@ List getIndexSettingsValidationErrors(final Settings settings, final boo } else if (Strings.isEmpty(customPath) == false) { Path resolvedPath = PathUtils.get(new Path[]{env.sharedDataFile()}, customPath); if (resolvedPath == null) { - validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]"); + validationErrors.add("custom path [" + customPath + + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]"); } } if (forbidPrivateIndexSettings) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 97ec26ded674c..6badfd6ef586a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -64,7 +64,8 @@ public class MetaDataIndexStateService extends AbstractComponent { private static final Logger logger = LogManager.getLogger(MetaDataIndexStateService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE); + public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false, + false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE); private final ClusterService clusterService; @@ -92,7 +93,8 @@ public void closeIndex(final CloseIndexClusterStateUpdateRequest request, final } final String indicesAsString = Arrays.toString(request.indices()); - clusterService.submitStateUpdateTask("close-indices " + indicesAsString, new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { + clusterService.submitStateUpdateTask("close-indices " + indicesAsString, + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { return new ClusterStateUpdateResponse(acknowledged); @@ -142,7 +144,8 @@ public ClusterState execute(ClusterState currentState) { }); } - public void openIndex(final OpenIndexClusterStateUpdateRequest request, final ActionListener listener) { + public void openIndex(final OpenIndexClusterStateUpdateRequest request, + final ActionListener listener) { onlyOpenIndex(request, ActionListener.wrap(response -> { if (response.isAcknowledged()) { String[] indexNames = Arrays.stream(request.indices()).map(Index::getName).toArray(String[]::new); @@ -160,13 +163,15 @@ public void openIndex(final OpenIndexClusterStateUpdateRequest request, final Ac }, listener::onFailure)); } - private void onlyOpenIndex(final OpenIndexClusterStateUpdateRequest request, final ActionListener listener) { + private void onlyOpenIndex(final OpenIndexClusterStateUpdateRequest request, + final ActionListener listener) { if (request.indices() == null || request.indices().length == 0) { throw new IllegalArgumentException("Index name is required"); } final String indicesAsString = Arrays.toString(request.indices()); - clusterService.submitStateUpdateTask("open-indices " + indicesAsString, new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { + clusterService.submitStateUpdateTask("open-indices " + indicesAsString, + new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { @Override protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { return new ClusterStateUpdateResponse(acknowledged); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 3d87990797699..9bc324a42482a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -186,7 +186,8 @@ public Set> entrySet() { return Collections.emptySet(); } }; - try (IndexAnalyzers fakeIndexAnalzyers = new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap, analyzerMap)) { + try (IndexAnalyzers fakeIndexAnalzyers = + new IndexAnalyzers(indexSettings, fakeDefault, fakeDefault, fakeDefault, analyzerMap, analyzerMap, analyzerMap)) { MapperService mapperService = new MapperService(indexSettings, fakeIndexAnalzyers, xContentRegistry, similarityService, mapperRegistry, () -> null); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); @@ -201,7 +202,8 @@ public Set> entrySet() { * Marks index as upgraded so we don't have to test it again */ private IndexMetaData markAsUpgraded(IndexMetaData indexMetaData) { - Settings settings = Settings.builder().put(indexMetaData.getSettings()).put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).build(); + Settings settings = Settings.builder().put(indexMetaData.getSettings()) + .put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).build(); return IndexMetaData.builder(indexMetaData).settings(settings).build(); } @@ -209,8 +211,10 @@ IndexMetaData archiveBrokenIndexSettings(IndexMetaData indexMetaData) { final Settings settings = indexMetaData.getSettings(); final Settings upgrade = indexScopedSettings.archiveUnknownOrInvalidSettings( settings, - e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), - (e, ex) -> logger.warn(() -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); + e -> logger.warn("{} ignoring unknown index setting: [{}] with value [{}]; archiving", + indexMetaData.getIndex(), e.getKey(), e.getValue()), + (e, ex) -> logger.warn(() -> new ParameterizedMessage("{} ignoring invalid index setting: [{}] with value [{}]; archiving", + indexMetaData.getIndex(), e.getKey(), e.getValue()), ex)); if (upgrade != settings) { return IndexMetaData.builder(indexMetaData).settings(upgrade).build(); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 616fd13d1fadc..3cb8ea9e91d12 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -175,7 +175,8 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui try { List updatedTypes = new ArrayList<>(); MapperService mapperService = indexService.mapperService(); - for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), + mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { if (mapper != null) { final String type = mapper.type(); if (!mapper.mappingSource().equals(builder.mapping(type).source())) { @@ -188,7 +189,8 @@ private boolean refreshIndexMapping(IndexService indexService, IndexMetaData.Bui if (updatedTypes.isEmpty() == false) { logger.warn("[{}] re-syncing mappings with cluster state because of types [{}]", index, updatedTypes); dirty = true; - for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), + mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { if (mapper != null) { builder.putMapping(new MappingMetaData(mapper)); } @@ -215,8 +217,8 @@ public void refreshMapping(final String index, final String indexUUID) { class PutMappingExecutor implements ClusterStateTaskExecutor { @Override - public ClusterTasksResult execute(ClusterState currentState, - List tasks) throws Exception { + public ClusterTasksResult + execute(ClusterState currentState, List tasks) throws Exception { Map indexMapperServices = new HashMap<>(); ClusterTasksResult.Builder builder = ClusterTasksResult.builder(); try { @@ -325,7 +327,8 @@ private ClusterState applyRequest(ClusterState currentState, PutMappingClusterSt IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData); // Mapping updates on a single type may have side-effects on other types so we need to // update mapping metadata on all types - for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { + for (DocumentMapper mapper : Arrays.asList(mapperService.documentMapper(), + mapperService.documentMapper(MapperService.DEFAULT_MAPPING))) { if (mapper != null) { indexMetaDataBuilder.putMapping(new MappingMetaData(mapper.mappingSource())); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 55a6ed3893d8c..b2bc082f2846b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -85,8 +85,10 @@ public MetaDataUpdateSettingsService(Settings settings, ClusterService clusterSe this.indicesService = indicesService; } - public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, final ActionListener listener) { - final Settings normalizedSettings = Settings.builder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); + public void updateSettings(final UpdateSettingsClusterStateUpdateRequest request, + final ActionListener listener) { + final Settings normalizedSettings = + Settings.builder().put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); Settings.Builder settingsForClosedIndices = Settings.builder(); Settings.Builder settingsForOpenIndices = Settings.builder(); final Set skippedSettings = new HashSet<>(); @@ -171,11 +173,16 @@ public ClusterState execute(ClusterState currentState) { } ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, openSettings); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings); - maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_BLOCK, + IndexMetaData.INDEX_READ_ONLY_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK, + IndexMetaData.INDEX_BLOCKS_READ_ONLY_ALLOW_DELETE_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_METADATA_BLOCK, + IndexMetaData.INDEX_BLOCKS_METADATA_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_WRITE_BLOCK, + IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, openSettings); + maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, + IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings); if (!openIndices.isEmpty()) { for (Index index : openIndices) { @@ -187,7 +194,8 @@ public ClusterState execute(ClusterState currentState) { indexSettings.put(indexMetaData.getSettings()); } Settings finalSettings = indexSettings.build(); - indexScopedSettings.validate(finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true); + indexScopedSettings.validate( + finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true); metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(finalSettings)); } } @@ -203,7 +211,8 @@ public ClusterState execute(ClusterState currentState) { indexSettings.put(indexMetaData.getSettings()); } Settings finalSettings = indexSettings.build(); - indexScopedSettings.validate(finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true); + indexScopedSettings.validate( + finalSettings.filter(k -> indexScopedSettings.isPrivateSetting(k) == false), true); metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(finalSettings)); } } @@ -218,7 +227,8 @@ public ClusterState execute(ClusterState currentState) { } } - ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder).routingTable(routingTableBuilder.build()).blocks(blocks).build(); + ClusterState updatedState = ClusterState.builder(currentState).metaData(metaDataBuilder) + .routingTable(routingTableBuilder.build()).blocks(blocks).build(); // now, reroute in case things change that require it (like number of replicas) updatedState = allocationService.reroute(updatedState, "settings update"); @@ -256,7 +266,8 @@ private int getTotalNewShards(Index index, ClusterState currentState, int update /** * Updates the cluster block only iff the setting exists in the given settings */ - private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlocks.Builder blocks, ClusterBlock block, Setting setting, Settings openSettings) { + private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlocks.Builder blocks, ClusterBlock block, + Setting setting, Settings openSettings) { if (setting.exists(openSettings)) { final boolean updateBlock = setting.get(openSettings); for (String index : actualIndices) { @@ -270,7 +281,8 @@ private static void maybeUpdateClusterBlock(String[] actualIndices, ClusterBlock } - public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, final ActionListener listener) { + public void upgradeIndexSettings(final UpgradeSettingsClusterStateUpdateRequest request, + final ActionListener listener) { clusterService.submitStateUpdateTask("update-index-compatibility-versions", new AckedClusterStateUpdateTask(Priority.URGENT, request, wrapPreservingContext(listener, threadPool.getThreadContext())) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 7bb72be0e1e18..1c618c1ef88fd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -157,7 +157,8 @@ public static RepositoriesMetaData fromXContent(XContentParser parser) throws IO } settings = Settings.fromXContent(parser); } else { - throw new ElasticsearchParseException("failed to parse repository [{}], unknown field [{}]", name, currentFieldName); + throw new ElasticsearchParseException("failed to parse repository [{}], unknown field [{}]", + name, currentFieldName); } } else { throw new ElasticsearchParseException("failed to parse repository [{}]", name); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index e9d805d34c8a1..526dde505efde 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -356,8 +356,8 @@ public Builder initializeAsFromCloseToOpen(IndexMetaData indexMetaData) { */ public Builder initializeAsNewRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards) { final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NEW_INDEX_RESTORED, - "restore_source[" + recoverySource.snapshot().getRepository() + "/" + - recoverySource.snapshot().getSnapshotId().getName() + "]"); + "restore_source[" + recoverySource.snapshot().getRepository() + "/" + + recoverySource.snapshot().getSnapshotId().getName() + "]"); return initializeAsRestore(indexMetaData, recoverySource, ignoreShards, true, unassignedInfo); } @@ -366,15 +366,16 @@ public Builder initializeAsNewRestore(IndexMetaData indexMetaData, SnapshotRecov */ public Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource) { final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, - "restore_source[" + recoverySource.snapshot().getRepository() + "/" + - recoverySource.snapshot().getSnapshotId().getName() + "]"); + "restore_source[" + recoverySource.snapshot().getRepository() + "/" + + recoverySource.snapshot().getSnapshotId().getName() + "]"); return initializeAsRestore(indexMetaData, recoverySource, null, false, unassignedInfo); } /** * Initializes an index, to be restored from snapshot */ - private Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards, boolean asNew, UnassignedInfo unassignedInfo) { + private Builder initializeAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource, IntSet ignoreShards, + boolean asNew, UnassignedInfo unassignedInfo) { assert indexMetaData.getIndex().equals(index); if (!shards.isEmpty()) { throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); @@ -435,7 +436,8 @@ public Builder addReplica() { int shardNumber = cursor.value; ShardId shardId = new ShardId(index, shardNumber); // version 0, will get updated when reroute will happen - ShardRouting shard = ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null)); + ShardRouting shard = ShardRouting.newUnassigned(shardId, false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.REPLICA_ADDED, null)); shards.put(shardNumber, new IndexShardRoutingTable.Builder(shards.get(shard.id())).addShard(shard).build() ); @@ -522,7 +524,8 @@ public String prettyPrint() { }); for (IndexShardRoutingTable indexShard : ordered) { - sb.append("----shard_id [").append(indexShard.shardId().getIndex().getName()).append("][").append(indexShard.shardId().id()).append("]\n"); + sb.append("----shard_id [").append(indexShard.shardId().getIndex().getName()) + .append("][").append(indexShard.shardId().id()).append("]\n"); for (ShardRouting shard : indexShard) { sb.append("--------").append(shard.shortSummary()).append("\n"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index e3e4da481ceed..7c60d2126555d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -594,7 +594,8 @@ private AttributesRoutings getInitializingAttribute(AttributesKey key, Discovery ArrayList from = new ArrayList<>(allInitializingShards); List to = collectAttributeShards(key, nodes, from); shardRoutings = new AttributesRoutings(to, Collections.unmodifiableList(from)); - initializingShardsByAttributes = MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap(); + initializingShardsByAttributes = + MapBuilder.newMapBuilder(initializingShardsByAttributes).put(key, shardRoutings).immutableMap(); } } return shardRoutings; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 87655c0641388..c4034dcad8c97 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -75,12 +75,14 @@ public ShardIterator indexShards(ClusterState clusterState, String index, String } public ShardIterator getShards(ClusterState clusterState, String index, String id, @Nullable String routing, @Nullable String preference) { - return preferenceActiveShardIterator(shards(clusterState, index, id, routing), clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference, null, null); + return preferenceActiveShardIterator(shards(clusterState, index, id, routing), clusterState.nodes().getLocalNodeId(), + clusterState.nodes(), preference, null, null); } public ShardIterator getShards(ClusterState clusterState, String index, int shardId, @Nullable String preference) { final IndexShardRoutingTable indexShard = clusterState.getRoutingTable().shardRoutingTable(index, shardId); - return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), preference, null, null); + return preferenceActiveShardIterator(indexShard, clusterState.nodes().getLocalNodeId(), clusterState.nodes(), + preference, null, null); } public GroupShardsIterator searchShards(ClusterState clusterState, @@ -111,7 +113,8 @@ public GroupShardsIterator searchShards(ClusterState clusterState private static final Map> EMPTY_ROUTING = Collections.emptyMap(); - private Set computeTargetedShards(ClusterState clusterState, String[] concreteIndices, @Nullable Map> routing) { + private Set computeTargetedShards(ClusterState clusterState, String[] concreteIndices, + @Nullable Map> routing) { routing = routing == null ? EMPTY_ROUTING : routing; // just use an empty map final Set set = new HashSet<>(); // we use set here and not list since we might get duplicates diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index aaf34d1e1806d..c827aa8f69aa0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -957,12 +957,14 @@ public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySour } /** - * Unsupported operation, just there for the interface. Use {@link #removeAndIgnore(AllocationStatus, RoutingChangesObserver)} or + * Unsupported operation, just there for the interface. Use + * {@link #removeAndIgnore(AllocationStatus, RoutingChangesObserver)} or * {@link #initialize(String, String, long, RoutingChangesObserver)}. */ @Override public void remove() { - throw new UnsupportedOperationException("remove is not supported in unassigned iterator, use removeAndIgnore or initialize"); + throw new UnsupportedOperationException("remove is not supported in unassigned iterator," + + " use removeAndIgnore or initialize"); } private void innerRemove() { @@ -1106,14 +1108,19 @@ public static boolean assertShardStats(RoutingNodes routingNodes) { assert unassignedPrimaryCount == routingNodes.unassignedShards.getNumPrimaries() : - "Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + routingNodes.unassigned().getNumPrimaries() + "]"; + "Unassigned primaries is [" + unassignedPrimaryCount + "] but RoutingNodes returned unassigned primaries [" + + routingNodes.unassigned().getNumPrimaries() + "]"; assert unassignedIgnoredPrimaryCount == routingNodes.unassignedShards.getNumIgnoredPrimaries() : - "Unassigned ignored primaries is [" + unassignedIgnoredPrimaryCount + "] but RoutingNodes returned unassigned ignored primaries [" + routingNodes.unassigned().getNumIgnoredPrimaries() + "]"; + "Unassigned ignored primaries is [" + unassignedIgnoredPrimaryCount + + "] but RoutingNodes returned unassigned ignored primaries [" + routingNodes.unassigned().getNumIgnoredPrimaries() + "]"; assert inactivePrimaryCount == routingNodes.inactivePrimaryCount : - "Inactive Primary count [" + inactivePrimaryCount + "] but RoutingNodes returned inactive primaries [" + routingNodes.inactivePrimaryCount + "]"; + "Inactive Primary count [" + inactivePrimaryCount + "] but RoutingNodes returned inactive primaries [" + + routingNodes.inactivePrimaryCount + "]"; assert inactiveShardCount == routingNodes.inactiveShardCount : - "Inactive Shard count [" + inactiveShardCount + "] but RoutingNodes returned inactive shards [" + routingNodes.inactiveShardCount + "]"; - assert routingNodes.getRelocatingShardCount() == relocating : "Relocating shards mismatch [" + routingNodes.getRelocatingShardCount() + "] but expected [" + relocating + "]"; + "Inactive Shard count [" + inactiveShardCount + "] but RoutingNodes returned inactive shards [" + + routingNodes.inactiveShardCount + "]"; + assert routingNodes.getRelocatingShardCount() == relocating : "Relocating shards mismatch [" + + routingNodes.getRelocatingShardCount() + "] but expected [" + relocating + "]"; return true; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 0bcefa9fc7248..770e5b2717023 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -90,30 +90,33 @@ protected void performReroute(String reason) { return; } logger.trace("rerouting {}", reason); - clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", new ClusterStateUpdateTask(Priority.HIGH) { - @Override - public ClusterState execute(ClusterState currentState) { - rerouting.set(false); - return allocationService.reroute(currentState, reason); - } + clusterService.submitStateUpdateTask(CLUSTER_UPDATE_TASK_SOURCE + "(" + reason + ")", + new ClusterStateUpdateTask(Priority.HIGH) { + @Override + public ClusterState execute(ClusterState currentState) { + rerouting.set(false); + return allocationService.reroute(currentState, reason); + } - @Override - public void onNoLongerMaster(String source) { - rerouting.set(false); - // no biggie - } + @Override + public void onNoLongerMaster(String source) { + rerouting.set(false); + // no biggie + } - @Override - public void onFailure(String source, Exception e) { - rerouting.set(false); - ClusterState state = clusterService.state(); - if (logger.isTraceEnabled()) { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", source, state), e); - } else { - logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", source, state.version()), e); + @Override + public void onFailure(String source, Exception e) { + rerouting.set(false); + ClusterState state = clusterService.state(); + if (logger.isTraceEnabled()) { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state:\n{}", + source, state), e); + } else { + logger.error(() -> new ParameterizedMessage("unexpected failure during [{}], current state version [{}]", + source, state.version()), e); + } } - } - }); + }); } catch (Exception e) { rerouting.set(false); ClusterState state = clusterService.state(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index bab150fff12bd..ab8d091ac13a7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -239,14 +239,16 @@ public GroupShardsIterator allAssignedShardsGrouped(String[] indi * @param includeRelocationTargets if true, an extra shard iterator will be added for relocating shards. The extra * iterator contains a single ShardRouting pointing at the relocating target */ - public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) { + public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, + boolean includeRelocationTargets) { return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ASSIGNED_PREDICATE); } private static Predicate ACTIVE_PREDICATE = ShardRouting::active; private static Predicate ASSIGNED_PREDICATE = ShardRouting::assignedToNode; - private GroupShardsIterator allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets, Predicate predicate) { + private GroupShardsIterator allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, + boolean includeRelocationTargets, Predicate predicate) { // use list here since we need to maintain identity across shards ArrayList set = new ArrayList<>(); for (String index : indices) { @@ -260,7 +262,8 @@ private GroupShardsIterator allSatisfyingPredicateShardsGrouped(S if (predicate.test(shardRouting)) { set.add(shardRouting.shardsIt()); if (includeRelocationTargets && shardRouting.relocating()) { - set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.getTargetRelocatingShard()))); + set.add(new PlainShardIterator(shardRouting.shardId(), + Collections.singletonList(shardRouting.getTargetRelocatingShard()))); } } else if (includeEmpty) { // we need this for counting properly, just make it an empty one set.add(new PlainShardIterator(shardRouting.shardId(), Collections.emptyList())); @@ -279,7 +282,8 @@ public ShardsIterator allShardsIncludingRelocationTargets(String[] indices) { return allShardsSatisfyingPredicate(indices, shardRouting -> true, true); } - private ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate predicate, boolean includeRelocationTargets) { + private ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate predicate, + boolean includeRelocationTargets) { // use list here since we need to maintain identity across shards List shards = new ArrayList<>(); for (String index : indices) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 74341ca271a9c..bfc4ce0618833 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -77,12 +77,17 @@ public final class ShardRouting implements Writeable, ToXContentObject { this.expectedShardSize = expectedShardSize; this.targetRelocatingShard = initializeTargetRelocatingShard(); this.asList = Collections.singletonList(this); - assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; - assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; + assert expectedShardSize == UNAVAILABLE_EXPECTED_SHARD_SIZE || state == ShardRoutingState.INITIALIZING || + state == ShardRoutingState.RELOCATING : expectedShardSize + " state: " + state; + assert expectedShardSize >= 0 || state != ShardRoutingState.INITIALIZING || state != ShardRoutingState.RELOCATING : + expectedShardSize + " state: " + state; assert !(state == ShardRoutingState.UNASSIGNED && unassignedInfo == null) : "unassigned shard must be created with meta"; - assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : "recovery source only available on unassigned or initializing shard but was " + state; - assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary : "replica shards always recover from primary"; - assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : "unassigned shard must not be assigned to a node " + this; + assert (state == ShardRoutingState.UNASSIGNED || state == ShardRoutingState.INITIALIZING) == (recoverySource != null) : + "recovery source only available on unassigned or initializing shard but was " + state; + assert recoverySource == null || recoverySource == PeerRecoverySource.INSTANCE || primary : + "replica shards always recover from primary"; + assert (currentNodeId == null) == (state == ShardRoutingState.UNASSIGNED) : + "unassigned shard must not be assigned to a node " + this; } @Nullable @@ -98,8 +103,10 @@ private ShardRouting initializeTargetRelocatingShard() { /** * Creates a new unassigned shard. */ - public static ShardRouting newUnassigned(ShardId shardId, boolean primary, RecoverySource recoverySource, UnassignedInfo unassignedInfo) { - return new ShardRouting(shardId, null, null, primary, ShardRoutingState.UNASSIGNED, recoverySource, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE); + public static ShardRouting newUnassigned(ShardId shardId, boolean primary, RecoverySource recoverySource, + UnassignedInfo unassignedInfo) { + return new ShardRouting(shardId, null, null, primary, ShardRoutingState.UNASSIGNED, + recoverySource, unassignedInfo, null, UNAVAILABLE_EXPECTED_SHARD_SIZE); } public Index index() { @@ -446,12 +453,14 @@ public ShardRouting moveUnassignedFromPrimary() { **/ public boolean isSameAllocation(ShardRouting other) { boolean b = this.allocationId != null && other.allocationId != null && this.allocationId.getId().equals(other.allocationId.getId()); - assert b == false || this.currentNodeId.equals(other.currentNodeId) : "ShardRoutings have the same allocation id but not the same node. This [" + this + "], other [" + other + "]"; + assert b == false || this.currentNodeId.equals(other.currentNodeId) : + "ShardRoutings have the same allocation id but not the same node. This [" + this + "], other [" + other + "]"; return b; } /** - * Returns true if this shard is a relocation target for another shard (i.e., was created with {@link #initializeTargetRelocatingShard()} + * Returns true if this shard is a relocation target for another shard + * (i.e., was created with {@link #initializeTargetRelocatingShard()} */ public boolean isRelocationTarget() { return state == ShardRoutingState.INITIALIZING && relocatingNodeId != null; @@ -465,21 +474,25 @@ public boolean isRelocationTargetOf(ShardRouting other) { assert b == false || other.state == ShardRoutingState.RELOCATING : "ShardRouting is a relocation target but the source shard state isn't relocating. This [" + this + "], other [" + other + "]"; - assert b == false || other.allocationId.getId().equals(this.allocationId.getRelocationId()) : - "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but the source id isn't equal to source's allocationId.getRelocationId." + + " This [" + this + "], other [" + other + "]"; assert b == false || other.currentNodeId().equals(this.relocatingNodeId) : - "ShardRouting is a relocation target but source current node id isn't equal to target relocating node. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but source current node id isn't equal to target relocating node." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.currentNodeId().equals(other.relocatingNodeId) : - "ShardRouting is a relocation target but current node id isn't equal to source relocating node. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but current node id isn't equal to source relocating node." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.shardId.equals(other.shardId) : - "ShardRouting is a relocation target but both indexRoutings are not of the same shard id. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation target but both indexRoutings are not of the same shard id." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.primary == other.primary : - "ShardRouting is a relocation target but primary flag is different. This [" + this + "], target [" + other + "]"; + "ShardRouting is a relocation target but primary flag is different." + + " This [" + this + "], target [" + other + "]"; return b; } @@ -494,16 +507,20 @@ public boolean isRelocationSourceOf(ShardRouting other) { assert b == false || this.allocationId.getId().equals(other.allocationId.getRelocationId()) : - "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation source but the allocation id isn't equal to other.allocationId.getRelocationId." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.currentNodeId().equals(other.relocatingNodeId) : - "ShardRouting is a relocation source but current node isn't equal to other's relocating node. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation source but current node isn't equal to other's relocating node." + + " This [" + this + "], other [" + other + "]"; assert b == false || other.currentNodeId().equals(this.relocatingNodeId) : - "ShardRouting is a relocation source but relocating node isn't equal to other's current node. This [" + this + "], other [" + other + "]"; + "ShardRouting is a relocation source but relocating node isn't equal to other's current node." + + " This [" + this + "], other [" + other + "]"; assert b == false || this.shardId.equals(other.shardId) : - "ShardRouting is a relocation source but both indexRoutings are not of the same shard. This [" + this + "], target [" + other + "]"; + "ShardRouting is a relocation source but both indexRoutings are not of the same shard." + + " This [" + this + "], target [" + other + "]"; assert b == false || this.primary == other.primary : "ShardRouting is a relocation source but primary flag is different. This [" + this + "], target [" + other + "]"; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index ec0af211ecca5..4ddeb8de0737c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -171,9 +171,10 @@ public float getShardBalance() { *
  • shard balance - balance property over shards per cluster
  • * *

    - * Each of these properties are expressed as factor such that the properties factor defines the relative importance of the property for the - * weight function. For example if the weight function should calculate the weights only based on a global (shard) balance the index balance - * can be set to {@code 0.0} and will in turn have no effect on the distribution. + * Each of these properties are expressed as factor such that the properties factor defines the relative + * importance of the property for the weight function. For example if the weight function should calculate + * the weights only based on a global (shard) balance the index balance can be set to {@code 0.0} and will + * in turn have no effect on the distribution. *

    * The weight per index is calculated based on the following formula: *
      @@ -531,7 +532,8 @@ private void balanceByWeights() { break advance_range; } if (logger.isTraceEnabled()) { - logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}] max_node [{}] weight: [{}] delta: [{}]", + logger.trace("Stop balancing index [{}] min_node [{}] weight: [{}]" + + " max_node [{}] weight: [{}] delta: [{}]", index, maxNode.getNodeId(), weights[highIdx], minNode.getNodeId(), weights[lowIdx], delta); } break; @@ -651,7 +653,8 @@ public void moveShards() { final ModelNode targetNode = nodes.get(moveDecision.getTargetNode().getId()); sourceNode.removeShard(shardRouting); Tuple relocatingShards = routingNodes.relocateShard(shardRouting, targetNode.getNodeId(), - allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); + allocation.clusterInfo().getShardSize(shardRouting, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); targetNode.addShard(relocatingShards.v2()); if (logger.isTraceEnabled()) { logger.trace("Moved shard [{}] to node [{}]", shardRouting, targetNode.getRoutingNode()); @@ -794,7 +797,8 @@ private void allocateUnassigned() { /* * we use 2 arrays and move replicas to the second array once we allocated an identical * replica in the current iteration to make sure all indices get allocated in the same manner. - * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with 2 replica and 1 shard would look like: + * The arrays are sorted by primaries first and then by index and shard ID so a 2 indices with + * 2 replica and 1 shard would look like: * [(0,P,IDX1), (0,P,IDX2), (0,R,IDX1), (0,R,IDX1), (0,R,IDX2), (0,R,IDX2)] * if we allocate for instance (0, R, IDX1) we move the second replica to the secondary array and proceed with * the next replica. If we could not find a node to allocate (0,R,IDX1) we move all it's replicas to ignoreUnassigned. @@ -1007,7 +1011,8 @@ private boolean tryRelocateShard(ModelNode minNode, ModelNode maxNode, String id && ((rebalanceDecision.type() == Type.YES) || (rebalanceDecision.type() == Type.THROTTLE))) { if (maxNode.containsShard(shard)) { // simulate moving shard from maxNode to minNode - final float delta = weight.weightShardAdded(this, minNode, idx) - weight.weightShardRemoved(this, maxNode, idx); + final float delta = weight.weightShardAdded( + this, minNode, idx) - weight.weightShardRemoved(this, maxNode, idx); if (delta < minCost || (candidate != null && Float.compare(delta, minCost) == 0 && candidate.id() > shard.id())) { /* this last line is a tie-breaker to make the shard allocation alg deterministic @@ -1039,8 +1044,8 @@ private boolean tryRelocateShard(ModelNode minNode, ModelNode maxNode, String id } } if (logger.isTraceEnabled()) { - logger.trace("Couldn't find shard to relocate from node [{}] to node [{}] allocation decision [{}]", maxNode.getNodeId(), - minNode.getNodeId(), decision == null ? "NO" : decision.type().name()); + logger.trace("Couldn't find shard to relocate from node [{}] to node [{}] allocation decision [{}]", + maxNode.getNodeId(), minNode.getNodeId(), decision == null ? "NO" : decision.type().name()); } return false; } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java index 4ffd70aee1cd8..0e6ba4f051dd3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AbstractAllocateAllocationCommand.java @@ -186,7 +186,8 @@ protected RerouteExplanation explainOrThrowRejectedCommand(boolean explain, Rout * @param routingNode the node to initialize it to * @param shardRouting the shard routing that is to be matched in unassigned shards */ - protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, RoutingNode routingNode, ShardRouting shardRouting) { + protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNodes routingNodes, + RoutingNode routingNode, ShardRouting shardRouting) { initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting, null, null); } @@ -212,7 +213,8 @@ protected void initializeUnassignedShard(RoutingAllocation allocation, RoutingNo unassigned = it.updateUnassigned(unassignedInfo != null ? unassignedInfo : unassigned.unassignedInfo(), recoverySource != null ? recoverySource : unassigned.recoverySource(), allocation.changes()); } - it.initialize(routingNode.nodeId(), null, allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); + it.initialize(routingNode.nodeId(), null, + allocation.clusterInfo().getShardSize(unassigned, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); return; } assert false : "shard to initialize not found in list of unassigned shards"; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index a42fd2765b598..4d037570dd266 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -121,8 +121,8 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) } if (shardRouting.recoverySource().getType() != RecoverySource.Type.EMPTY_STORE && acceptDataLoss == false) { - String dataLossWarning = "allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm " + - "by setting the accept_data_loss parameter to true"; + String dataLossWarning = "allocating an empty primary for [" + index + "][" + shardId + + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true"; return explainOrThrowRejectedCommand(explain, allocation, dataLossWarning); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java index 6ec09a9bbbbe9..709681f2b2008 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java @@ -109,10 +109,12 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) } if (primaryShardRouting.unassigned()) { return explainOrThrowRejectedCommand(explain, allocation, - "trying to allocate a replica shard [" + index + "][" + shardId + "], while corresponding primary shard is still unassigned"); + "trying to allocate a replica shard [" + index + "][" + shardId + + "], while corresponding primary shard is still unassigned"); } - List replicaShardRoutings = allocation.routingTable().shardRoutingTable(index, shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED); + List replicaShardRoutings = + allocation.routingTable().shardRoutingTable(index, shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED); ShardRouting shardRouting; if (replicaShardRoutings.isEmpty()) { return explainOrThrowRejectedCommand(explain, allocation, @@ -127,7 +129,8 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) if (explain) { return new RerouteExplanation(this, decision); } - throw new IllegalArgumentException("[" + name() + "] allocation of [" + index + "][" + shardId + "] on node " + discoNode + " is not allowed, reason: " + decision); + throw new IllegalArgumentException("[" + name() + "] allocation of [" + index + "][" + shardId + "] on node " + discoNode + + " is not allowed, reason: " + decision); } initializeUnassignedShard(allocation, routingNodes, routingNode, shardRouting); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index 72290eb9ccf1a..67122cb3ff1b1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -161,7 +161,8 @@ public static AllocationCommands fromXContent(XContentParser parser) throws IOEx commands.add(parser.namedObject(AllocationCommand.class, commandName, null)); // move to the end object one if (parser.nextToken() != XContentParser.Token.END_OBJECT) { - throw new ElasticsearchParseException("allocation command is malformed, done parsing a command, but didn't get END_OBJECT, got [{}] instead", token); + throw new ElasticsearchParseException("allocation command is malformed, done parsing a command," + + " but didn't get END_OBJECT, got [{}] instead", token); } } else { throw new ElasticsearchParseException("allocation command is malformed, got [{}] instead", token); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java index 6b4af8c605aae..9358935542029 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java @@ -139,12 +139,14 @@ public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) if (explain) { return new RerouteExplanation(this, decision); } - throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + toDiscoNode + ", since its not allowed, reason: " + decision); + throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + + toDiscoNode + ", since its not allowed, reason: " + decision); } if (decision.type() == Decision.Type.THROTTLE) { // its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it... } - allocation.routingNodes().relocateShard(shardRouting, toRoutingNode.nodeId(), allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); + allocation.routingNodes().relocateShard(shardRouting, toRoutingNode.nodeId(), + allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); } if (!found) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index 53e67ba25a429..ee7f761d65d97 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -74,7 +74,8 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing // short track if a NO is returned. if (decision == Decision.NO) { if (logger.isTraceEnabled()) { - logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.node(), allocationDecider.getClass().getSimpleName()); + logger.trace("Can not allocate [{}] on node [{}] due to [{}]", + shardRouting, node.node(), allocationDecider.getClass().getSimpleName()); } // short circuit only if debugging is not enabled if (!allocation.debugDecision()) { @@ -106,7 +107,8 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl // short track if a NO is returned. if (decision == Decision.NO) { if (logger.isTraceEnabled()) { - logger.trace("Shard [{}] can not remain on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName()); + logger.trace("Shard [{}] can not remain on node [{}] due to [{}]", + shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName()); } if (!allocation.debugDecision()) { return decision; diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java index d6cf029c00d82..8693308e8650b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterHealthIT.java @@ -36,7 +36,8 @@ public void testSimpleLocalHealth() { for (String node : internalCluster().getNodeNames()) { // a very high time out, which should never fire due to the local flag - ClusterHealthResponse health = client(node).admin().cluster().prepareHealth().setLocal(true).setWaitForEvents(Priority.LANGUID).setTimeout("30s").get("10s"); + ClusterHealthResponse health = client(node).admin().cluster().prepareHealth().setLocal(true) + .setWaitForEvents(Priority.LANGUID).setTimeout("30s").get("10s"); assertThat(health.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(health.isTimedOut(), equalTo(false)); } @@ -44,7 +45,8 @@ public void testSimpleLocalHealth() { public void testHealth() { logger.info("--> running cluster health on an index that does not exists"); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test1") + .setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(true)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse.getIndices().isEmpty(), equalTo(true)); @@ -59,13 +61,15 @@ public void testHealth() { createIndex("test1"); logger.info("--> running cluster health on an index that does exists"); - healthResponse = client().admin().cluster().prepareHealth("test1").setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth("test1") + .setWaitForGreenStatus().setTimeout("10s").execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); logger.info("--> running cluster health on an index that does exists and an index that doesn't exists"); - healthResponse = client().admin().cluster().prepareHealth("test1", "test2").setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth("test1", "test2") + .setWaitForYellowStatus().setTimeout("1s").execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(true)); assertThat(healthResponse.getStatus(), equalTo(ClusterHealthStatus.RED)); assertThat(healthResponse.getIndices().get("test1").getStatus(), equalTo(ClusterHealthStatus.GREEN)); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java index 1a0e964ef7740..61507a8363d95 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterInfoServiceIT.java @@ -132,7 +132,8 @@ public void testClusterInfoServiceCollectsInformation() throws Exception { ensureGreen("test"); InternalTestCluster internalTestCluster = internalCluster(); // Get the cluster info service on the master node - final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); + final InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster + .getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); infoService.setUpdateFrequency(TimeValue.timeValueMillis(200)); infoService.onMaster(); ClusterInfo info = infoService.refresh(); @@ -178,7 +179,8 @@ public void testClusterInfoServiceInformationClearOnError() { prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get(); ensureGreen("test"); InternalTestCluster internalTestCluster = internalCluster(); - InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster.getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); + InternalClusterInfoService infoService = (InternalClusterInfoService) internalTestCluster + .getInstance(ClusterInfoService.class, internalTestCluster.getMasterName()); // get one healthy sample ClusterInfo info = infoService.refresh(); assertNotNull("failed to collect info", info); @@ -186,10 +188,12 @@ public void testClusterInfoServiceInformationClearOnError() { assertThat("some shard sizes are populated", info.shardSizes.size(), greaterThan(0)); - MockTransportService mockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, internalTestCluster.getMasterName()); + MockTransportService mockTransportService = (MockTransportService) internalCluster() + .getInstance(TransportService.class, internalTestCluster.getMasterName()); final AtomicBoolean timeout = new AtomicBoolean(false); - final Set blockedActions = newHashSet(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[n]"); + final Set blockedActions = newHashSet(NodesStatsAction.NAME, NodesStatsAction.NAME + "[n]", + IndicesStatsAction.NAME, IndicesStatsAction.NAME + "[n]"); // drop all outgoing stats requests to force a timeout. for (DiscoveryNode node : internalTestCluster.clusterService().state().getNodes()) { mockTransportService.addSendBehavior(internalTestCluster.getInstance(TransportService.class, node.getName()), diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index a5d865a274140..b5359634fcb36 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -162,7 +162,8 @@ public void testClusterStateDiffSerialization() throws Exception { // Check cluster blocks assertThat(clusterStateFromDiffs.blocks().global(), equalTo(clusterStateFromDiffs.blocks().global())); assertThat(clusterStateFromDiffs.blocks().indices(), equalTo(clusterStateFromDiffs.blocks().indices())); - assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), equalTo(clusterStateFromDiffs.blocks().disableStatePersistence())); + assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), + equalTo(clusterStateFromDiffs.blocks().disableStatePersistence())); // Check metadata assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version())); @@ -179,9 +180,11 @@ public void testClusterStateDiffSerialization() throws Exception { // Smoke test - we cannot compare bytes to bytes because some elements might get serialized in different order // however, serialized size should remain the same - assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, equalTo(ClusterState.Builder.toBytes(clusterState).length)); + assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, + equalTo(ClusterState.Builder.toBytes(clusterState).length)); } catch (AssertionError error) { - logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", clusterState.toString(), clusterStateFromDiffs.toString()); + logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", + clusterState.toString(), clusterStateFromDiffs.toString()); throw error; } } @@ -195,7 +198,8 @@ public void testClusterStateDiffSerialization() throws Exception { */ private ClusterState.Builder randomNodes(ClusterState clusterState) { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - List nodeIds = randomSubsetOf(randomInt(clusterState.nodes().getNodes().size() - 1), clusterState.nodes().getNodes().keys().toArray(String.class)); + List nodeIds = randomSubsetOf(randomInt(clusterState.nodes().getNodes().size() - 1), + clusterState.nodes().getNodes().keys().toArray(String.class)); for (String nodeId : nodeIds) { if (nodeId.startsWith("node-")) { nodes.remove(nodeId); @@ -220,18 +224,21 @@ private ClusterState.Builder randomRoutingTable(ClusterState clusterState) { RoutingTable.Builder builder = RoutingTable.builder(clusterState.routingTable()); int numberOfIndices = clusterState.routingTable().indicesRouting().size(); if (numberOfIndices > 0) { - List randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), clusterState.routingTable().indicesRouting().keys().toArray(String.class)); + List randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), + clusterState.routingTable().indicesRouting().keys().toArray(String.class)); for (String index : randomIndices) { if (randomBoolean()) { builder.remove(index); } else { - builder.add(randomChangeToIndexRoutingTable(clusterState.routingTable().indicesRouting().get(index), clusterState.nodes().getNodes().keys().toArray(String.class))); + builder.add(randomChangeToIndexRoutingTable(clusterState.routingTable().indicesRouting().get(index), + clusterState.nodes().getNodes().keys().toArray(String.class))); } } } int additionalIndexCount = randomIntBetween(1, 20); for (int i = 0; i < additionalIndexCount; i++) { - builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().getNodes().keys().toArray(String.class))); + builder.add(randomIndexRoutingTable("index-" + randomInt(), + clusterState.nodes().getNodes().keys().toArray(String.class))); } return ClusterState.builder(clusterState).routingTable(builder.build()); } @@ -299,7 +306,8 @@ private ClusterState.Builder randomBlocks(ClusterState clusterState) { ClusterBlocks.Builder builder = ClusterBlocks.builder().blocks(clusterState.blocks()); int globalBlocksCount = clusterState.blocks().global().size(); if (globalBlocksCount > 0) { - List blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount])); + List blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), + clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount])); for (ClusterBlock block : blocks) { builder.removeGlobalBlock(block); } @@ -366,7 +374,8 @@ private ClusterState randomClusterStateParts(ClusterState clusterState, Stri ImmutableOpenMap parts = randomPart.parts(clusterState); int partCount = parts.size(); if (partCount > 0) { - List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(clusterState).keys().toArray(String.class)); + List randomParts = randomSubsetOf(randomInt(partCount - 1), + randomPart.parts(clusterState).keys().toArray(String.class)); for (String part : randomParts) { if (randomBoolean()) { randomPart.remove(builder, part); @@ -477,7 +486,8 @@ private MetaData randomParts(MetaData metaData, String prefix, RandomPart ImmutableOpenMap parts = randomPart.parts(metaData); int partCount = parts.size(); if (partCount > 0) { - List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metaData).keys().toArray(String.class)); + List randomParts = randomSubsetOf(randomInt(partCount - 1), + randomPart.parts(metaData).keys().toArray(String.class)); for (String part : randomParts) { if (randomBoolean()) { randomPart.remove(builder, part); @@ -545,7 +555,8 @@ public IndexMetaData randomChange(IndexMetaData part) { } break; case 2: - builder.settings(Settings.builder().put(part.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())); + builder.settings(Settings.builder().put(part.getSettings()) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID())); break; default: throw new IllegalArgumentException("Shouldn't be here"); diff --git a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java index 4ef8f7cbdb770..c4fcb9bdb53e2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/DiskUsageTests.java @@ -98,13 +98,15 @@ public void testRandomDiskUsage() { public void testFillShardLevelInfo() { final Index index = new Index("test", "0xdeadbeef"); - ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_0 = ShardRouting.newUnassigned(new ShardId(index, 0), false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_0 = ShardRoutingHelper.initialize(test_0, "node1"); test_0 = ShardRoutingHelper.moveToStarted(test_0); Path test0Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("0"); CommonStats commonStats0 = new CommonStats(); commonStats0.store = new StoreStats(100); - ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), false, PeerRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); + ShardRouting test_1 = ShardRouting.newUnassigned(new ShardId(index, 1), false, PeerRecoverySource.INSTANCE, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "foo")); test_1 = ShardRoutingHelper.initialize(test_1, "node2"); test_1 = ShardRoutingHelper.moveToStarted(test_1); Path test1Path = createTempDir().resolve("indices").resolve(index.getUUID()).resolve("1"); diff --git a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java index 31ffb026e3a7f..2a606328ce466 100644 --- a/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/MinimumMasterNodesIT.java @@ -96,7 +96,8 @@ public void testSimpleMinimumMasterNodes() throws Exception { logger.info("--> start second node, cluster should be formed"); internalCluster().startNode(settings); - ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); @@ -115,13 +116,15 @@ public void testSimpleMinimumMasterNodes() throws Exception { client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet(); } // make sure that all shards recovered before trying to flush - assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().getActiveShards(), equalTo(numShards.totalNumShards)); + assertThat(client().admin().cluster().prepareHealth("test") + .setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().getActiveShards(), equalTo(numShards.totalNumShards)); // flush for simpler debugging flushAndRefresh(); logger.info("--> verify we the data back"); for (int i = 0; i < 10; i++) { - assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(100L)); + assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()) + .execute().actionGet().getHits().getTotalHits(), equalTo(100L)); } internalCluster().stopCurrentMasterNode(); @@ -138,7 +141,8 @@ public void testSimpleMinimumMasterNodes() throws Exception { logger.info("--> starting the previous master node again..."); internalCluster().startNode(settings); - clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet(); + clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForYellowStatus().setWaitForNodes("2").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); @@ -167,7 +171,8 @@ public void testSimpleMinimumMasterNodes() throws Exception { internalCluster().startNode(settings); ensureGreen(); - clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet(); + clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("2").setWaitForGreenStatus().execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().setLocal(true).execute().actionGet().getState(); @@ -211,7 +216,8 @@ public void testMultipleNodesShutdownNonMasterNodes() throws Exception { internalCluster().startNodes(2, settings); ensureGreen(); - ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet(); + ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth() + .setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet(); assertThat(clusterHealthResponse.isTimedOut(), equalTo(false)); state = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -225,7 +231,8 @@ public void testMultipleNodesShutdownNonMasterNodes() throws Exception { } ensureGreen(); // make sure that all shards recovered before trying to flush - assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), equalTo(false)); + assertThat(client().admin().cluster().prepareHealth("test") + .setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), equalTo(false)); // flush for simpler debugging client().admin().indices().prepareFlush().execute().actionGet(); @@ -303,7 +310,8 @@ private void assertNoMasterBlockOnAllNodes() throws InterruptedException { for (Client client : internalCluster().getClients()) { boolean clientHasNoMasterBlock = hasNoMasterBlock.test(client); if (logger.isDebugEnabled()) { - logger.debug("Checking for NO_MASTER_BLOCK on client: {} NO_MASTER_BLOCK: [{}]", client, clientHasNoMasterBlock); + logger.debug("Checking for NO_MASTER_BLOCK on client: {} NO_MASTER_BLOCK: [{}]", + client, clientHasNoMasterBlock); } success &= clientHasNoMasterBlock; } @@ -334,20 +342,25 @@ public void testCanNotBringClusterDown() throws ExecutionException, InterruptedE int updateCount = randomIntBetween(1, nodeCount); - logger.info("--> updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); + logger.info("--> updating [{}] to [{}]", + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); assertAcked(client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount))); + .setPersistentSettings(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount))); logger.info("--> verifying no node left and master is up"); assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(nodeCount)).get().isTimedOut()); updateCount = nodeCount + randomIntBetween(1, 2000); - logger.info("--> trying to updating [{}] to [{}]", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); + logger.info("--> trying to updating [{}] to [{}]", + ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount); try { client().admin().cluster().prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount)); + .setPersistentSettings(Settings.builder() + .put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), updateCount)); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [" +updateCount+ "]"); + assertEquals(ex.getMessage(), + "cannot set discovery.zen.minimum_master_nodes to more than the current master nodes count [" +updateCount+ "]"); } logger.info("--> verifying no node left and master is up"); @@ -411,7 +424,8 @@ public void onFailure(String source, Exception e) { for (String node : internalCluster().getNodeNames()) { Settings nodeSetting = internalCluster().clusterService(node).state().metaData().settings(); - assertThat(node + " processed the cluster state despite of a min master node violation", nodeSetting.get("_SHOULD_NOT_BE_THERE_"), nullValue()); + assertThat(node + " processed the cluster state despite of a min master node violation", + nodeSetting.get("_SHOULD_NOT_BE_THERE_"), nullValue()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java b/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java index b8a5e26d5c9ab..231a79f8902cf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/NoMasterNodeIT.java @@ -109,29 +109,35 @@ public void testNoMasterActions() throws Exception { checkUpdateAction(false, timeout, client().prepareUpdate("test", "type1", "1") .setScript(new Script( - ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", Collections.emptyMap())).setTimeout(timeout)); + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", + Collections.emptyMap())).setTimeout(timeout)); checkUpdateAction(true, timeout, client().prepareUpdate("no_index", "type1", "1") .setScript(new Script( - ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", Collections.emptyMap())).setTimeout(timeout)); + ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "test script", + Collections.emptyMap())).setTimeout(timeout)); - checkWriteAction( - client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout)); + checkWriteAction(client().prepareIndex("test", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout)); - checkWriteAction( - client().prepareIndex("no_index", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout)); + checkWriteAction(client().prepareIndex("no_index", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout)); BulkRequestBuilder bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("test", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject())); - bulkRequestBuilder.add(client().prepareIndex("test", "type1", "2").setSource(XContentFactory.jsonBuilder().startObject().endObject())); + bulkRequestBuilder.add(client().prepareIndex("test", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject())); + bulkRequestBuilder.add(client().prepareIndex("test", "type1", "2") + .setSource(XContentFactory.jsonBuilder().startObject().endObject())); bulkRequestBuilder.setTimeout(timeout); checkWriteAction(bulkRequestBuilder); bulkRequestBuilder = client().prepareBulk(); - bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject())); - bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "2").setSource(XContentFactory.jsonBuilder().startObject().endObject())); + bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject())); + bulkRequestBuilder.add(client().prepareIndex("no_index", "type1", "2") + .setSource(XContentFactory.jsonBuilder().startObject().endObject())); bulkRequestBuilder.setTimeout(timeout); checkWriteAction(bulkRequestBuilder); @@ -219,7 +225,8 @@ public void testNoMasterActionsWriteMasterBlock() throws Exception { } try { - client().prepareIndex("test1", "type1", "1").setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout).get(); + client().prepareIndex("test1", "type1", "1") + .setSource(XContentFactory.jsonBuilder().startObject().endObject()).setTimeout(timeout).get(); fail("Expected ClusterBlockException"); } catch (ClusterBlockException e) { assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE)); diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 3e27b784e0a10..a68764defea78 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -224,9 +224,11 @@ public void testLargeClusterStatePublishing() throws Exception { .addMapping("type", mapping) .setTimeout("60s").get()); ensureGreen(); // wait for green state, so its both green, and there are no more pending events - MappingMetaData masterMappingMetaData = client().admin().indices().prepareGetMappings("test").setTypes("type").get().getMappings().get("test").get("type"); + MappingMetaData masterMappingMetaData = client().admin().indices() + .prepareGetMappings("test").setTypes("type").get().getMappings().get("test").get("type"); for (Client client : clients()) { - MappingMetaData mappingMetadata = client.admin().indices().prepareGetMappings("test").setTypes("type").setLocal(true).get().getMappings().get("test").get("type"); + MappingMetaData mappingMetadata = client.admin().indices() + .prepareGetMappings("test").setTypes("type").setLocal(true).get().getMappings().get("test").get("type"); assertThat(mappingMetadata.source().string(), equalTo(masterMappingMetaData.source().string())); assertThat(mappingMetadata, equalTo(masterMappingMetaData)); } @@ -368,7 +370,8 @@ public Collection createComponents( if (state.nodes().isLocalNodeElectedMaster()) { if (state.custom("test") == null) { if (installed.compareAndSet(false, true)) { - clusterService.submitStateUpdateTask("install-metadata-custom", new ClusterStateUpdateTask(Priority.URGENT) { + clusterService.submitStateUpdateTask("install-metadata-custom", + new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { diff --git a/server/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java index c2f6c3b64faae..d5a986b8affff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SimpleDataNodesIT.java @@ -49,7 +49,8 @@ public void testDataNodes() throws Exception { } internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build()); - assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false)); + assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("2") + .setLocal(true).execute().actionGet().isTimedOut(), equalTo(false)); // still no shard should be allocated try { @@ -62,7 +63,8 @@ public void testDataNodes() throws Exception { // now, start a node data, and see that it gets with shards internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).build()); - assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setLocal(true).execute().actionGet().isTimedOut(), equalTo(false)); + assertThat(client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("3") + .setLocal(true).execute().actionGet().isTimedOut(), equalTo(false)); IndexResponse indexResponse = client().index(Requests.indexRequest("test").type("type1").id("1") .source(source("1", "test"), XContentType.JSON)).actionGet(); diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index cb557fa13628a..74d7e5c4ff7e8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -49,70 +49,94 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start data node / non master node"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false) + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) + .put(Node.NODE_MASTER_SETTING.getKey(), false) .put("discovery.initial_state_timeout", "1s")); try { - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms") + .execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected } logger.info("--> start master node"); - final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + final String masterNodeName = internalCluster() + .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> stop master node"); internalCluster().stopCurrentMasterNode(); try { - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms") + .execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected } logger.info("--> start master node"); - final String nextMasterEligibleNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); + final String nextMasterEligibleNodeName = internalCluster() + .startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } public void testElectOnlyBetweenMasterNodes() throws IOException { logger.info("--> start data node / non master node"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); + internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) + .put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); try { - assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms").execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); + assertThat(client().admin().cluster().prepareState().setMasterNodeTimeout("100ms") + .execute().actionGet().getState().nodes().getMasterNodeId(), nullValue()); fail("should not be able to find master"); } catch (MasterNotDiscoveredException e) { // all is well, no master elected } logger.info("--> start master node (1)"); - final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + final String masterNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false) + .put(Node.NODE_MASTER_SETTING.getKey(), true)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> start master node (2)"); - final String nextMasterEligableNodeName = internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + final String nextMasterEligableNodeName = internalCluster().startNode(Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> closing master node (1)"); internalCluster().stopCurrentMasterNode(); - assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); - assertThat(internalCluster().masterClient().admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); + assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); + assertThat(internalCluster().masterClient().admin().cluster().prepareState() + .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); } public void testAliasFilterValidation() throws Exception { logger.info("--> start master node / non data"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); + internalCluster().startNode(Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); logger.info("--> start data node / non master node"); - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); + internalCluster().startNode(Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); - assertAcked(prepareCreate("test").addMapping("type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", " + + assertAcked(prepareCreate("test").addMapping( + "type1", "{\"type1\" : {\"properties\" : {\"table_a\" : { \"type\" : \"nested\", " + "\"properties\" : {\"field_a\" : { \"type\" : \"keyword\" },\"field_b\" :{ \"type\" : \"keyword\" }}}}}}", XContentType.JSON)); - client().admin().indices().prepareAliases().addAlias("test", "a_test", QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"), ScoreMode.Avg)).get(); + client().admin().indices().prepareAliases().addAlias("test", "a_test", + QueryBuilders.nestedQuery("table_a", QueryBuilders.termQuery("table_a.field_b", "y"), ScoreMode.Avg)).get(); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java b/server/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java index a21f61ce8afdb..11cdb8afc4c41 100644 --- a/server/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationIT.java @@ -41,17 +41,21 @@ public void testUpdateSettingsValidation() throws Exception { createIndex("test"); NumShards test = getNumShards("test"); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth("test") + .setWaitForEvents(Priority.LANGUID).setWaitForNodes("3").setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.totalNumShards)); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)).execute().actionGet(); - healthResponse = client().admin().cluster().prepareHealth("test").setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.number_of_replicas", 0)).execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth("test") + .setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); assertThat(healthResponse.getIndices().get("test").getActiveShards(), equalTo(test.numPrimaries)); try { - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.refresh_interval", "")).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put("index.refresh_interval", "")).execute().actionGet(); fail(); } catch (IllegalArgumentException ex) { logger.info("Error message: [{}]", ex.getMessage()); diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java index 03340e211b41d..450acebc983e9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsIT.java @@ -111,7 +111,8 @@ public void testClusterUpdateSettingsAcknowledgement() { ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings() .setTransientSettings(Settings.builder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get(); assertAcked(clusterUpdateSettingsResponse); - assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId)); + assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), + equalTo(excludedNodeId)); for (Client client : clients()) { ClusterState clusterState = getLocalClusterState(client); @@ -120,9 +121,11 @@ public void testClusterUpdateSettingsAcknowledgement() { for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { for (ShardRouting shardRouting : indexShardRoutingTable) { assert clusterState.nodes() != null; - if (shardRouting.unassigned() == false && clusterState.nodes().get(shardRouting.currentNodeId()).getId().equals(excludedNodeId)) { - //if the shard is still there it must be relocating and all nodes need to know, since the request was acknowledged - //reroute happens as part of the update settings and we made sure no throttling comes into the picture via settings + if (shardRouting.unassigned() == false && clusterState.nodes() + .get(shardRouting.currentNodeId()).getId().equals(excludedNodeId)) { + // if the shard is still there it must be relocating and all nodes need to know, + // since the request was acknowledged reroute happens as part of the update settings + // and we made sure no throttling comes into the picture via settings assertThat(shardRouting.relocating(), equalTo(true)); } } @@ -154,7 +157,8 @@ public void testClusterUpdateSettingsNoAcknowledgement() { ClusterUpdateSettingsResponse clusterUpdateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTimeout("0s") .setTransientSettings(Settings.builder().put("cluster.routing.allocation.exclude._id", excludedNodeId)).get(); assertThat(clusterUpdateSettingsResponse.isAcknowledged(), equalTo(false)); - assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), equalTo(excludedNodeId)); + assertThat(clusterUpdateSettingsResponse.getTransientSettings().get("cluster.routing.allocation.exclude._id"), + equalTo(excludedNodeId)); } private static ClusterState getLocalClusterState(Client client) { diff --git a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java index df97854cc35b0..097dcb1920217 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/ack/AckIT.java @@ -128,7 +128,8 @@ public void testClusterRerouteNoAcknowledgement() throws InterruptedException { MoveAllocationCommand moveAllocationCommand = getAllocationCommand(); - ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").add(moveAllocationCommand).get(); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute() + .setTimeout("0s").add(moveAllocationCommand).get(); assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(false)); } @@ -146,8 +147,9 @@ public void testClusterRerouteAcknowledgementDryRun() throws InterruptedExceptio assertAcked(client().admin().cluster().prepareReroute().setDryRun(true).add(moveAllocationCommand)); - //testing only on master with the latest cluster state as we didn't make any change thus we cannot guarantee that - //all nodes hold the same cluster state version. We only know there was no need to change anything, thus no need for ack on this update. + // testing only on master with the latest cluster state as we didn't make any change thus + // we cannot guarantee that all nodes hold the same cluster state version. We only know there + // was no need to change anything, thus no need for ack on this update. ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); boolean found = false; for (ShardRouting shardRouting : clusterStateResponse.getState().getRoutingNodes().node(moveAllocationCommand.fromNode())) { @@ -176,7 +178,8 @@ public void testClusterRerouteNoAcknowledgementDryRun() throws InterruptedExcept MoveAllocationCommand moveAllocationCommand = getAllocationCommand(); - ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s").setDryRun(true).add(moveAllocationCommand).get(); + ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setTimeout("0s") + .setDryRun(true).add(moveAllocationCommand).get(); //acknowledged anyway as no changes were made assertThat(clusterRerouteResponse.isAcknowledged(), equalTo(true)); } @@ -219,7 +222,8 @@ public void testIndicesAliasesAcknowledgement() { assertAcked(client().admin().indices().prepareAliases().addAlias("test", "alias")); for (Client client : clients()) { - AliasMetaData aliasMetaData = ((AliasOrIndex.Alias) getLocalClusterState(client).metaData().getAliasAndIndexLookup().get("alias")).getFirstAliasMetaData(); + AliasMetaData aliasMetaData = ((AliasOrIndex.Alias) getLocalClusterState(client) + .metaData().getAliasAndIndexLookup().get("alias")).getFirstAliasMetaData(); assertThat(aliasMetaData.alias(), equalTo("alias")); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java index 01d0c518c1be7..60a5d4a3e3f1f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardFailedClusterStateTaskExecutorTests.java @@ -88,7 +88,8 @@ public void setUp() throws Exception { routingTable = RoutingTable.builder() .addAsNew(metaData.index(INDEX)) .build(); - clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(routingTable).build(); + clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); executor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger); } @@ -120,12 +121,13 @@ public void testTriviallySuccessfulTasksBatchedWithFailingTasks() throws Excepti ClusterState currentState = createClusterStateWithStartedShards(reason); List failingTasks = createExistingShards(currentState, reason); List nonExistentTasks = createNonExistentShards(currentState, reason); - ShardStateAction.ShardFailedClusterStateTaskExecutor failingExecutor = new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger) { - @Override - ClusterState applyFailedShards(ClusterState currentState, List failedShards, List staleShards) { - throw new RuntimeException("simulated applyFailedShards failure"); - } - }; + ShardStateAction.ShardFailedClusterStateTaskExecutor failingExecutor = + new ShardStateAction.ShardFailedClusterStateTaskExecutor(allocationService, null, logger) { + @Override + ClusterState applyFailedShards(ClusterState currentState, List failedShards, List staleShards) { + throw new RuntimeException("simulated applyFailedShards failure"); + } + }; List tasks = new ArrayList<>(); tasks.addAll(failingTasks); tasks.addAll(nonExistentTasks); @@ -200,7 +202,8 @@ private ClusterState createClusterStateWithStartedShards(String reason) { private List createExistingShards(ClusterState currentState, String reason) { List shards = new ArrayList<>(); - GroupShardsIterator shardGroups = currentState.routingTable().allAssignedShardsGrouped(new String[] { INDEX }, true); + GroupShardsIterator shardGroups = currentState.routingTable() + .allAssignedShardsGrouped(new String[] { INDEX }, true); for (ShardIterator shardIt : shardGroups) { for (ShardRouting shard : shardIt) { shards.add(shard); @@ -233,11 +236,13 @@ private List createNonExistentShards(ClusterS List existingShards = createExistingShards(currentState, reason); List shardsWithMismatchedAllocationIds = new ArrayList<>(); for (ShardStateAction.FailedShardEntry existingShard : existingShards) { - shardsWithMismatchedAllocationIds.add(new ShardStateAction.FailedShardEntry(existingShard.shardId, UUIDs.randomBase64UUID(), 0L, existingShard.message, existingShard.failure, randomBoolean())); + shardsWithMismatchedAllocationIds.add(new ShardStateAction.FailedShardEntry(existingShard.shardId, + UUIDs.randomBase64UUID(), 0L, existingShard.message, existingShard.failure, randomBoolean())); } List tasks = new ArrayList<>(); - nonExistentShards.forEach(shard -> tasks.add(new ShardStateAction.FailedShardEntry(shard.shardId(), shard.allocationId().getId(), 0L, + nonExistentShards.forEach(shard -> tasks.add( + new ShardStateAction.FailedShardEntry(shard.shardId(), shard.allocationId().getId(), 0L, reason, new CorruptIndexException("simulated", nonExistentIndexUUID), randomBoolean()))); tasks.addAll(shardsWithMismatchedAllocationIds); return tasks; @@ -303,7 +308,8 @@ private static void assertTaskResults( } } - private static List toTasks(ClusterState currentState, List shards, String indexUUID, String message) { + private static List toTasks(ClusterState currentState, List shards, + String indexUUID, String message) { return shards .stream() .map(shard -> new ShardStateAction.FailedShardEntry( diff --git a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java index 64fa51d159a54..4e39e12b7ced7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/action/shard/ShardStateActionTests.java @@ -89,7 +89,8 @@ public class ShardStateActionTests extends ESTestCase { private ClusterService clusterService; private static class TestShardStateAction extends ShardStateAction { - TestShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService, AllocationService allocationService, RoutingService routingService) { + TestShardStateAction(Settings settings, ClusterService clusterService, TransportService transportService, + AllocationService allocationService, RoutingService routingService) { super(settings, clusterService, transportService, allocationService, routingService, THREAD_POOL); } @@ -106,7 +107,8 @@ public void setOnAfterWaitForNewMasterAndRetry(Runnable onAfterWaitForNewMasterA } @Override - protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, TransportRequest request, Listener listener, Predicate changePredicate) { + protected void waitForNewMasterAndRetry(String actionName, ClusterStateObserver observer, TransportRequest request, + Listener listener, Predicate changePredicate) { onBeforeWaitForNewMasterAndRetry.run(); super.waitForNewMasterAndRetry(actionName, observer, request, listener, changePredicate); onAfterWaitForNewMasterAndRetry.run(); @@ -359,20 +361,21 @@ public void testNoLongerPrimaryShardException() throws InterruptedException { long primaryTerm = clusterService.state().metaData().index(index).primaryTerm(failedShard.id()); assertThat(primaryTerm, greaterThanOrEqualTo(1L)); - shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), primaryTerm + 1, randomBoolean(), "test", - getSimulatedFailure(), new ShardStateAction.Listener() { - @Override - public void onSuccess() { - failure.set(null); - latch.countDown(); - } + shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), + primaryTerm + 1, randomBoolean(), "test", getSimulatedFailure(), + new ShardStateAction.Listener() { + @Override + public void onSuccess() { + failure.set(null); + latch.countDown(); + } - @Override - public void onFailure(Exception e) { - failure.set(e); - latch.countDown(); - } - }); + @Override + public void onFailure(Exception e) { + failure.set(e); + latch.countDown(); + } + }); ShardStateAction.NoLongerPrimaryShardException catastrophicError = new ShardStateAction.NoLongerPrimaryShardException(failedShard.shardId(), "dummy failure"); @@ -445,7 +448,8 @@ public void testRemoteShardFailedConcurrently() throws Exception { for (int i = 0; i < iterationsPerThread; i++) { ShardRouting failedShard = randomFrom(failedShards); shardStateAction.remoteShardFailed(failedShard.shardId(), failedShard.allocationId().getId(), - randomLongBetween(1, Long.MAX_VALUE), randomBoolean(), "test", getSimulatedFailure(), new ShardStateAction.Listener() { + randomLongBetween(1, Long.MAX_VALUE), randomBoolean(), "test", getSimulatedFailure(), + new ShardStateAction.Listener() { @Override public void onSuccess() { notifiedResponses.incrementAndGet(); @@ -523,7 +527,8 @@ public void testShardEntryBWCSerialize() throws Exception { assertThat(failedShardEntry.failure, nullValue()); assertThat(failedShardEntry.markAsStale, equalTo(true)); } - try (StreamInput in = serialize(new FailedShardEntry(shardId, allocationId, 0L, reason, null, false), bwcVersion).streamInput()) { + try (StreamInput in = serialize(new FailedShardEntry(shardId, allocationId, 0L, + reason, null, false), bwcVersion).streamInput()) { in.setVersion(bwcVersion); final StartedShardEntry startedShardEntry = new StartedShardEntry(in); assertThat(startedShardEntry.shardId, equalTo(shardId)); diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java index fbb0fa732f601..fe63d1c39e4ae 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationIT.java @@ -77,7 +77,8 @@ public void testSimpleAwareness() throws Exception { assertThat(awaitBusy( () -> { logger.info("--> waiting for no relocation"); - ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).get(); + ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).get(); if (clusterHealth.isTimedOut()) { return false; } @@ -128,7 +129,8 @@ public void testAwarenessZones() throws Exception { .put("index.number_of_replicas", 1)).execute().actionGet(); logger.info("--> waiting for shards to be allocated"); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -163,7 +165,8 @@ public void testAwarenessZonesIncrementalNodes() throws Exception { client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put("index.number_of_shards", 5) .put("index.number_of_replicas", 1)).execute().actionGet(); - ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).execute().actionGet(); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); ObjectIntHashMap counts = new ObjectIntHashMap<>(); @@ -180,10 +183,12 @@ public void testAwarenessZonesIncrementalNodes() throws Exception { logger.info("--> starting another node in zone 'b'"); String B_1 = internalCluster().startNode(Settings.builder().put(commonSettings).put("node.attr.zone", "b").build()); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("3").execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); client().admin().cluster().prepareReroute().get(); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("3").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("3").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -202,10 +207,12 @@ public void testAwarenessZonesIncrementalNodes() throws Exception { assertThat(counts.get(B_1), equalTo(2)); String noZoneNode = internalCluster().startNode(); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("4").execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); client().admin().cluster().prepareReroute().get(); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -224,9 +231,11 @@ public void testAwarenessZonesIncrementalNodes() throws Exception { assertThat(counts.get(B_0), equalTo(3)); assertThat(counts.get(B_1), equalTo(2)); assertThat(counts.containsKey(noZoneNode), equalTo(false)); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "").build()).get(); + client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "").build()).get(); - health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); + health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus() + .setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(health.isTimedOut(), equalTo(false)); clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java index e7bcce2817c0b..71c9f5a15ba4d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteIT.java @@ -112,7 +112,8 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .setDryRun(true) .execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); logger.info("--> get the state, verify nothing changed because of the dry run"); state = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -124,15 +125,18 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) .execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForYellowStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.STARTED)); logger.info("--> move shard 1 primary from node1 to node2"); state = client().admin().cluster().prepareReroute() @@ -140,17 +144,21 @@ private void rerouteWithCommands(Settings commonSettings) throws Exception { .add(new MoveAllocationCommand("test", 0, node_1, node_2)) .execute().actionGet().getState(); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.RELOCATING)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.RELOCATING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); - healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForNoRelocatingShards(true).execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus() + .setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary moved from node1 to node2"); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(), + equalTo(ShardRoutingState.STARTED)); } public void testRerouteWithAllocateLocalGateway_disableAllocationSettings() throws Exception { @@ -223,17 +231,21 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) .execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); - healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet(); + healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID) + .setWaitForYellowStatus().execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); logger.info("--> get the state, verify shard 1 primary allocated"); state = client().admin().cluster().prepareState().execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.STARTED)); - client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); + client().prepareIndex("test", "type", "1").setSource("field", "value") + .setRefreshPolicy(RefreshPolicy.IMMEDIATE).get(); final Index index = resolveIndex("test"); logger.info("--> closing all nodes"); @@ -251,14 +263,16 @@ private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exc // wait a bit for the cluster to realize that the shard is not there... // TODO can we get around this? the cluster is RED, so what do we wait for? client().admin().cluster().prepareReroute().get(); - assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED)); + assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(), + equalTo(ClusterHealthStatus.RED)); logger.info("--> explicitly allocate primary"); state = client().admin().cluster().prepareReroute() .setExplain(randomBoolean()) .add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)) .execute().actionGet().getState(); assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1)); - assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING)); + assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), + equalTo(ShardRoutingState.INITIALIZING)); logger.info("--> get the state, verify shard 1 primary allocated"); final String nodeToCheck = node_1; @@ -393,7 +407,8 @@ public void testClusterRerouteWithBlocks() throws Exception { List nodesIds = internalCluster().startNodes(2); logger.info("--> create an index with 1 shard and 0 replicas"); - assertAcked(prepareCreate("test-blocks").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))); + assertAcked(prepareCreate("test-blocks").setSettings(Settings.builder().put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0))); ensureGreen("test-blocks"); logger.info("--> check that the index has 1 shard"); @@ -417,10 +432,11 @@ public void testClusterRerouteWithBlocks() throws Exception { SETTING_READ_ONLY_ALLOW_DELETE)) { try { enableIndexBlock("test-blocks", blockSetting); - assertAcked(client().admin().cluster().prepareReroute() - .add(new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))); + assertAcked(client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test-blocks", 0, + nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))); - ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForNoRelocatingShards(true).execute().actionGet(); + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus() + .setWaitForNoRelocatingShards(true).execute().actionGet(); assertThat(healthResponse.isTimedOut(), equalTo(false)); } finally { disableIndexBlock("test-blocks", blockSetting); diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index c3d1a6040a8f5..90ba6cbf89066 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -65,7 +65,8 @@ public void testDecommissionNodeNoReplicas() throws Exception { client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); } client().admin().indices().prepareRefresh().execute().actionGet(); - assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(100L)); + assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet() + .getHits().getTotalHits(), equalTo(100L)); logger.info("--> decommission the second node"); client().admin().cluster().prepareUpdateSettings() @@ -84,7 +85,8 @@ public void testDecommissionNodeNoReplicas() throws Exception { } client().admin().indices().prepareRefresh().execute().actionGet(); - assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(100L)); + assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()) + .execute().actionGet().getHits().getTotalHits(), equalTo(100L)); } public void testDisablingAllocationFiltering() throws Exception { @@ -106,7 +108,8 @@ public void testDisablingAllocationFiltering() throws Exception { client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); } client().admin().indices().prepareRefresh().execute().actionGet(); - assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet().getHits().getTotalHits(), equalTo(100L)); + assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()) + .execute().actionGet().getHits().getTotalHits(), equalTo(100L)); ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState(); IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test"); int numShardsOnNode1 = 0; @@ -120,9 +123,10 @@ public void testDisablingAllocationFiltering() throws Exception { if (numShardsOnNode1 > ThrottlingAllocationDecider.DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES) { client().admin().cluster().prepareUpdateSettings() - .setTransientSettings(Settings.builder().put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet(); - // make sure we can recover all the nodes at once otherwise we might run into a state where one of the shards has not yet started relocating - // but we already fired up the request to wait for 0 relocating shards. + .setTransientSettings(Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", numShardsOnNode1)).execute().actionGet(); + // make sure we can recover all the nodes at once otherwise we might run into a state where + // one of the shards has not yet started relocating but we already fired up the request to wait for 0 relocating shards. } logger.info("--> remove index from the first node"); client().admin().indices().prepareUpdateSettings("test") diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java index 28aad61367d7f..f9c0691576f2b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/SimpleAllocationIT.java @@ -54,7 +54,8 @@ public void testSaneAllocation() { assertThat(node.size(), equalTo(2)); } } - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 0)).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 0)).execute().actionGet(); ensureGreen(); state = client().admin().cluster().prepareState().execute().actionGet().getState(); @@ -69,7 +70,8 @@ public void testSaneAllocation() { assertAcked(prepareCreate("test2", 3)); ensureGreen(); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 1)).execute().actionGet(); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 1)).execute().actionGet(); ensureGreen(); state = client().admin().cluster().prepareState().execute().actionGet().getState(); diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java index 851ab63297a21..e18b9ca398fc7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterIndexHealthTests.java @@ -134,19 +134,19 @@ protected ClusterIndexHealth mutateInstance(ClusterIndexHealth instance) throws instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); case "numberOfReplicas": return new ClusterIndexHealth(instance.getIndex(), instance.getNumberOfShards(), - instance.getNumberOfReplicas() + between(1, 10), instance.getActiveShards(), instance.getRelocatingShards(), - instance.getInitializingShards(), instance.getUnassignedShards(), + instance.getNumberOfReplicas() + between(1, 10), instance.getActiveShards(), + instance.getRelocatingShards(), instance.getInitializingShards(), instance.getUnassignedShards(), instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); case "activeShards": return new ClusterIndexHealth(instance.getIndex(), instance.getNumberOfShards(), - instance.getNumberOfReplicas(), instance.getActiveShards() + between(1, 10), instance.getRelocatingShards(), - instance.getInitializingShards(), instance.getUnassignedShards(), + instance.getNumberOfReplicas(), instance.getActiveShards() + between(1, 10), + instance.getRelocatingShards(), instance.getInitializingShards(), instance.getUnassignedShards(), instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); case "relocatingShards": return new ClusterIndexHealth(instance.getIndex(), instance.getNumberOfShards(), - instance.getNumberOfReplicas(), instance.getActiveShards(), instance.getRelocatingShards() + between(1, 10), - instance.getInitializingShards(), instance.getUnassignedShards(), - instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); + instance.getNumberOfReplicas(), instance.getActiveShards(), + instance.getRelocatingShards() + between(1, 10), instance.getInitializingShards(), + instance.getUnassignedShards(), instance.getActivePrimaryShards(), instance.getStatus(), instance.getShards()); case "initializingShards": return new ClusterIndexHealth(instance.getIndex(), instance.getNumberOfShards(), instance.getNumberOfReplicas(), instance.getActiveShards(), instance.getRelocatingShards(), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java index f24dbfbd002ca..b96d5eacb15d1 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java @@ -52,7 +52,8 @@ public class AutoExpandReplicasTests extends ESTestCase { public void testParseSettings() { - AutoExpandReplicas autoExpandReplicas = AutoExpandReplicas.SETTING.get(Settings.builder().put("index.auto_expand_replicas", "0-5").build()); + AutoExpandReplicas autoExpandReplicas = AutoExpandReplicas.SETTING + .get(Settings.builder().put("index.auto_expand_replicas", "0-5").build()); assertEquals(0, autoExpandReplicas.getMinReplicas()); assertEquals(5, autoExpandReplicas.getMaxReplicas(8)); assertEquals(2, autoExpandReplicas.getMaxReplicas(3)); @@ -133,7 +134,7 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE dataNodes.add(createNode(DiscoveryNode.Role.DATA)); } allNodes.addAll(dataNodes); - ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); + ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[0])); CreateIndexRequest request = new CreateIndexRequest("index", Settings.builder() @@ -173,7 +174,8 @@ public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedE .map(DiscoveryNode::getId).collect(Collectors.toSet()); List nodesToAdd = conflictingNodes.stream() - .map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), n.getAttributes(), n.getRoles(), n.getVersion())) + .map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), + n.getAttributes(), n.getRoles(), n.getVersion())) .collect(Collectors.toList()); if (randomBoolean()) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index b06609296487b..82a38ab331d8a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -63,9 +63,12 @@ public void testExpression() throws Exception { List indexExpressions = Arrays.asList("<.marvel-{now}>", "<.watch_history-{now}>", ""); List result = expressionResolver.resolve(context, indexExpressions); assertThat(result.size(), equalTo(3)); - assertThat(result.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); - assertThat(result.get(1), equalTo(".watch_history-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); - assertThat(result.get(2), equalTo("logstash-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(0), + equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(1), + equalTo(".watch_history-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + assertThat(result.get(2), + equalTo("logstash-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); } public void testEmpty() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 0832df7c896d9..2801f9abdd5d9 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -436,42 +436,48 @@ public void testIndexOptionsSingleIndexNoExpandWildcards() { //error on both unavailable and no indices + every alias needs to expand to a single index { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "baz*")); assertThat(infe.getIndex().getName(), equalTo("baz*")); } { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*")); assertThat(infe.getIndex().getName(), equalTo("baz*")); } { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "foofoobar")); assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); } { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "foo", "foofoobar")); assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); } { - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); IndexClosedException ince = expectThrows(IndexClosedException.class, () -> indexNameExpressionResolver.concreteIndexNames(context, "foofoo-closed", "foofoobar")); assertThat(ince.getMessage(), equalTo("closed")); assertEquals(ince.getIndex().getName(), "foofoo-closed"); } - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed()); String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "barbaz"); assertEquals(2, results.length); assertThat(results, arrayContainingInAnyOrder("foo", "foofoo")); @@ -501,7 +507,8 @@ public void testIndexOptionsEmptyCluster() { } - final IndexNameExpressionResolver.Context context2 = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); + final IndexNameExpressionResolver.Context context2 = + new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); results = indexNameExpressionResolver.concreteIndexNames(context2, Strings.EMPTY_ARRAY); assertThat(results, emptyArray()); results = indexNameExpressionResolver.concreteIndexNames(context2, "foo"); @@ -511,14 +518,16 @@ public void testIndexOptionsEmptyCluster() { results = indexNameExpressionResolver.concreteIndexNames(context2, "foo*", "bar"); assertThat(results, emptyArray()); - final IndexNameExpressionResolver.Context context3 = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, false, true, false)); + final IndexNameExpressionResolver.Context context3 = + new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, false, true, false)); IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, () -> indexNameExpressionResolver.concreteIndexNames(context3, Strings.EMPTY_ARRAY)); assertThat(infe.getResourceId().toString(), equalTo("[_all]")); } private static IndexMetaData.Builder indexBuilder(String index) { - return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); } public void testConcreteIndicesIgnoreIndicesOneMissingIndex() { @@ -540,7 +549,8 @@ public void testConcreteIndicesIgnoreIndicesOneMissingIndexOtherFound() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXX", "testZZZ")), equalTo(newHashSet("testXXX"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXX", "testZZZ")), + equalTo(newHashSet("testXXX"))); } public void testConcreteIndicesIgnoreIndicesAllMissing() { @@ -561,7 +571,8 @@ public void testConcreteIndicesIgnoreIndicesEmptyRequest() { .put(indexBuilder("kuku")); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[]{})), equalTo(newHashSet("kuku", "testXXX"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[]{})), + equalTo(newHashSet("kuku", "testXXX"))); } public void testConcreteIndicesWildcardExpansion() { @@ -573,14 +584,19 @@ public void testConcreteIndicesWildcardExpansion() { .put(indexBuilder("testYYX").state(State.OPEN)); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, false)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(new HashSet())); + IndexNameExpressionResolver.Context context = + new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, false)); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(new HashSet())); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, false)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(newHashSet("testXXX", "testXXY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, true)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXYY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(newHashSet("testXYY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true)); - assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), + equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); } public void testConcreteIndicesWildcardWithNegation() { @@ -891,17 +907,20 @@ public void testIsPatternMatchingAllIndicesNonMatchingTrailingWildcardAndExclusi public void testIndexOptionsFailClosedIndicesAndAliases() { MetaData.Builder mdBuilder = MetaData.builder() - .put(indexBuilder("foo1-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar1-closed")).putAlias(AliasMetaData.builder("foobar2-closed"))) + .put(indexBuilder("foo1-closed").state(IndexMetaData.State.CLOSE) + .putAlias(AliasMetaData.builder("foobar1-closed")).putAlias(AliasMetaData.builder("foobar2-closed"))) .put(indexBuilder("foo2-closed").state(IndexMetaData.State.CLOSE).putAlias(AliasMetaData.builder("foobar2-closed"))) .put(indexBuilder("foo3").putAlias(AliasMetaData.builder("foobar2-closed"))); ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); - IndexNameExpressionResolver.Context contextICE = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed()); + IndexNameExpressionResolver.Context contextICE = + new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed()); expectThrows(IndexClosedException.class, () -> indexNameExpressionResolver.concreteIndexNames(contextICE, "foo1-closed")); expectThrows(IndexClosedException.class, () -> indexNameExpressionResolver.concreteIndexNames(contextICE, "foobar1-closed")); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, - contextICE.getOptions().allowNoIndices(), contextICE.getOptions().expandWildcardsOpen(), contextICE.getOptions().expandWildcardsClosed(), contextICE.getOptions())); + contextICE.getOptions().allowNoIndices(), contextICE.getOptions().expandWildcardsOpen(), + contextICE.getOptions().expandWildcardsClosed(), contextICE.getOptions())); String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo1-closed"); assertThat(results, emptyArray()); @@ -926,7 +945,9 @@ public void testIndexOptionsFailClosedIndicesAndAliases() { // expected } - context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), context.getOptions().expandWildcardsClosed(), context.getOptions())); + context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, + context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), + context.getOptions().expandWildcardsClosed(), context.getOptions())); results = indexNameExpressionResolver.concreteIndexNames(context, "foobar2-closed"); assertThat(results, arrayWithSize(1)); assertThat(results, arrayContaining("foo3")); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index c2950256884c5..f5ac710510718 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -154,7 +154,8 @@ public void testIndexAndAliasWithSameName() { MetaData.builder().put(builder).build(); fail("exception should have been thrown"); } catch (IllegalStateException e) { - assertThat(e.getMessage(), equalTo("index and alias names need to be unique, but the following duplicates were found [index (alias of [index])]")); + assertThat(e.getMessage(), + equalTo("index and alias names need to be unique, but the following duplicates were found [index (alias of [index])]")); } } @@ -249,7 +250,8 @@ public void testResolveIndexRouting() { metaData.resolveIndexRouting("0", "alias1"); fail("should fail"); } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), is("Alias [alias1] has index routing associated with it [1], and was provided with routing value [0], rejecting operation")); + assertThat(ex.getMessage(), is("Alias [alias1] has index routing associated with it [1], " + + "and was provided with routing value [0], rejecting operation")); } // alias with invalid index routing. @@ -257,14 +259,16 @@ public void testResolveIndexRouting() { metaData.resolveIndexRouting(null, "alias2"); fail("should fail"); } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation")); + assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that" + + " resolved to several routing values, rejecting operation")); } try { metaData.resolveIndexRouting("1", "alias2"); fail("should fail"); } catch (IllegalArgumentException ex) { - assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that resolved to several routing values, rejecting operation")); + assertThat(ex.getMessage(), is("index/alias [alias2] provided with routing value [1,2] that" + + " resolved to several routing values, rejecting operation")); } IndexMetaData.Builder builder2 = IndexMetaData.builder("index2") diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java index 3ac55ec663ca0..ae24915e32d52 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetaDataTests.java @@ -302,7 +302,8 @@ public void testSimpleJsonFromAndTo() throws IOException { assertThat(parsedMetaData.templates().get("foo").aliases().size(), equalTo(3)); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar1").alias(), equalTo("alias-bar1")); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar2").alias(), equalTo("alias-bar2")); - assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar2").filter().string(), equalTo("{\"term\":{\"user\":\"kimchy\"}}")); + assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar2").filter().string(), + equalTo("{\"term\":{\"user\":\"kimchy\"}}")); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").alias(), equalTo("alias-bar3")); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").indexRouting(), equalTo("routing-bar")); assertThat(parsedMetaData.templates().get("foo").aliases().get("alias-bar3").searchRouting(), equalTo("routing-bar")); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java index cb2913e5820e1..a86dce7c3609a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/WildcardExpressionResolverTests.java @@ -50,11 +50,15 @@ public void testConvertWildcardsJustIndicesTests() { assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testXXX"))), equalTo(newHashSet("testXXX"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "ku*"))), equalTo(newHashSet("testXXX", "kuku"))); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXXX", "testXYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), + equalTo(newHashSet("testXXX", "testXYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testYYY"))), equalTo(newHashSet("testXXX", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))), equalTo(newHashSet("testXXX", "-testXXX"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "testY*"))), equalTo(newHashSet("testXXX", "testYYY"))); @@ -71,11 +75,14 @@ public void testConvertWildcardsTests() { IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYY*", "alias*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("-kuku"))), equalTo(newHashSet("-kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("test*", "-testYYY"))), equalTo(newHashSet("testXXX", "testXYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "testYYY"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); - assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYYY", "testX*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "testYYY"))) + , equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testYYY", "testX*"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); } public void testConvertWildcardsOpenClosedIndicesTests() { @@ -89,8 +96,10 @@ public void testConvertWildcardsOpenClosedIndicesTests() { ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); - IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true)); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, + IndicesOptions.fromOptions(true, true, true, true)); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), + equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, true)); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("testX*"))), equalTo(newHashSet("testXYY"))); context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, false)); @@ -111,10 +120,12 @@ public void testMultipleWildcards() { IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*X*"))), equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*X*"))), + equalTo(newHashSet("testXXX", "testXXY", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*X*Y"))), equalTo(newHashSet("testXXY", "testXYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("kuku*Y*"))), equalTo(newHashSet("kukuYYY"))); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*Y*"))), equalTo(newHashSet("testXXY", "testXYY", "testYYY", "kukuYYY"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*Y*"))), + equalTo(newHashSet("testXXY", "testXYY", "testYYY", "kukuYYY"))); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("test*Y*X"))).size(), equalTo(0)); assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("*Y*X"))).size(), equalTo(0)); } @@ -128,7 +139,8 @@ public void testAll() { IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen()); - assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("_all"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); + assertThat(newHashSet(resolver.resolve(context, Collections.singletonList("_all"))), + equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); } public void testResolveAliases() { @@ -141,14 +153,18 @@ public void testResolveAliases() { IndexNameExpressionResolver.WildcardExpressionResolver resolver = new IndexNameExpressionResolver.WildcardExpressionResolver(); // when ignoreAliases option is not set, WildcardExpressionResolver resolves the provided // expressions against the defined indices and aliases - IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), true, false, true, false, false); - IndexNameExpressionResolver.Context indicesAndAliasesContext = new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions); + IndicesOptions indicesAndAliasesOptions = IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), + true, false, true, false, false); + IndexNameExpressionResolver.Context indicesAndAliasesContext = + new IndexNameExpressionResolver.Context(state, indicesAndAliasesOptions); // ignoreAliases option is set, WildcardExpressionResolver throws error when IndicesOptions skipAliasesIndicesOptions = IndicesOptions.fromOptions(true, true, true, false, true, false, true); - IndexNameExpressionResolver.Context skipAliasesLenientContext = new IndexNameExpressionResolver.Context(state, skipAliasesIndicesOptions); + IndexNameExpressionResolver.Context skipAliasesLenientContext = + new IndexNameExpressionResolver.Context(state, skipAliasesIndicesOptions); // ignoreAliases option is set, WildcardExpressionResolver resolves the provided expressions only against the defined indices IndicesOptions errorOnAliasIndicesOptions = IndicesOptions.fromOptions(false, false, true, false, true, false, true); - IndexNameExpressionResolver.Context skipAliasesStrictContext = new IndexNameExpressionResolver.Context(state, errorOnAliasIndicesOptions); + IndexNameExpressionResolver.Context skipAliasesStrictContext = + new IndexNameExpressionResolver.Context(state, errorOnAliasIndicesOptions); { List indices = resolver.resolve(indicesAndAliasesContext, Collections.singletonList("foo_a*")); @@ -242,6 +258,7 @@ public void testMatchesConcreteIndicesWildcardAndAliases() { } private static IndexMetaData.Builder indexBuilder(String index) { - return IndexMetaData.builder(index).settings(settings(Version.CURRENT).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + return IndexMetaData.builder(index).settings(settings(Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java index 86dbeabd1d73e..379a72f6b9ffe 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/AllocationIdTests.java @@ -37,7 +37,8 @@ public class AllocationIdTests extends ESTestCase { public void testShardToStarted() { logger.info("-- create unassigned shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, + ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); assertThat(shard.allocationId(), nullValue()); logger.info("-- initialize the shard"); @@ -57,7 +58,8 @@ public void testShardToStarted() { public void testSuccessfulRelocation() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, + ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); @@ -80,7 +82,8 @@ public void testSuccessfulRelocation() { public void testCancelRelocation() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, + ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); @@ -100,7 +103,8 @@ public void testCancelRelocation() { public void testMoveToUnassigned() { logger.info("-- build started shard"); - ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = ShardRouting.newUnassigned(new ShardId("test","_na_", 0), true, + ExistingStoreRecoverySource.INSTANCE, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); shard = shard.initialize("node1", null, -1); shard = shard.moveToStarted(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java index e82dbf4d0e94c..c175624125e50 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/DelayedAllocationIT.java @@ -68,7 +68,8 @@ public void testDelayedAllocationNodeLeavesAndComesBack() throws Exception { ensureGreen("test"); indexRandomData(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); - assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState() + .getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); internalCluster().startNode(); // this will use the same data location as the stopped node ensureGreen("test"); @@ -90,7 +91,8 @@ public void testDelayedAllocationTimesOut() throws Exception { ensureGreen("test"); internalCluster().startNode(); // do a second round with longer delay to make sure it happens - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); ensureGreen("test"); } @@ -109,9 +111,11 @@ public void testDelayedAllocationChangeWithSettingTo100ms() throws Exception { ensureGreen("test"); indexRandomData(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); - assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get() + .getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(100))).get()); ensureGreen("test"); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); } @@ -130,9 +134,11 @@ public void testDelayedAllocationChangeWithSettingTo0() throws Exception { ensureGreen("test"); indexRandomData(); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(findNodeWithShard())); - assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState().getRoutingNodes().unassigned().size() > 0, equalTo(true))); + assertBusy(() -> assertThat(client().admin().cluster().prepareState().all().get().getState() + .getRoutingNodes().unassigned().size() > 0, equalTo(true))); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(1)); - assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(0))).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueMillis(0))).get()); ensureGreen("test"); assertThat(client().admin().cluster().prepareHealth().get().getDelayedUnassignedShards(), equalTo(0)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 9b2db5b34b1da..f1876eab2ae51 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -87,7 +87,8 @@ protected Settings nodeSettings(int nodeOrdinal) { } private void createStaleReplicaScenario(String master) throws Exception { - client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + client().prepareIndex("test", "type1").setSource(jsonBuilder() + .startObject().field("field", "value1").endObject()).get(); refresh(); ClusterState state = client().admin().cluster().prepareState().all().get().getState(); List shards = state.routingTable().allShards("test"); @@ -113,7 +114,8 @@ private void createStaleReplicaScenario(String master) throws Exception { ensureStableCluster(2, master); logger.info("--> index a document into previous replica shard (that is now primary)"); - client(replicaNode).prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + client(replicaNode).prepareIndex("test", "type1").setSource(jsonBuilder() + .startObject().field("field", "value1").endObject()).get(); logger.info("--> shut down node that has new acknowledged document"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); @@ -128,9 +130,11 @@ private void createStaleReplicaScenario(String master) throws Exception { logger.info("--> check that old primary shard does not get promoted to primary again"); // kick reroute and wait for all shard states to be fetched client(master).admin().cluster().prepareReroute().get(); - assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), equalTo(0))); + assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), + equalTo(0))); // kick reroute a second time and check that all shards are unassigned - assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), equalTo(2)); + assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), + equalTo(2)); } public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception { @@ -138,7 +142,8 @@ public void testDoNotAllowStaleReplicasToBePromotedToPrimary() throws Exception String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(2); assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + .setSettings(Settings.builder().put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1)).get()); ensureGreen(); createStaleReplicaScenario(master); @@ -163,16 +168,19 @@ public void testFailedAllocationOfStalePrimaryToDataNodeWithNoData() throws Exce internalCluster().stopRandomNode(InternalTestCluster.nameFilter(dataNodeWithShardCopy)); ensureStableCluster(1); - assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NODE_LEFT)); + assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test") + .getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NODE_LEFT)); logger.info("--> force allocation of stale copy to node that does not have shard copy"); - client().admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, dataNodeWithNoShardCopy, true)).get(); + client().admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand("test", 0, + dataNodeWithNoShardCopy, true)).get(); logger.info("--> wait until shard is failed and becomes unassigned again"); assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().toString(), client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); - assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); + assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test") + .getShards().get(0).primaryShard().unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); } public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { @@ -180,7 +188,8 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { String master = internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNodes(2); assertAcked(client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 1)).get()); + .setSettings(Settings.builder().put("index.number_of_shards", 1) + .put("index.number_of_replicas", 1)).get()); ensureGreen(); Set historyUUIDs = Arrays.stream(client().admin().indices().prepareStats("test").clear().get().getShards()) .map(shard -> shard.getCommitStats().getUserData().get(Engine.HISTORY_UUID_KEY)).collect(Collectors.toSet()); @@ -189,7 +198,8 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { boolean useStaleReplica = randomBoolean(); // if true, use stale replica, otherwise a completely empty copy logger.info("--> explicitly promote old primary shard"); final String idxName = "test"; - ImmutableOpenIntMap> storeStatuses = client().admin().indices().prepareShardStores(idxName).get().getStoreStatuses().get(idxName); + ImmutableOpenIntMap> storeStatuses = client().admin().indices() + .prepareShardStores(idxName).get().getStoreStatuses().get(idxName); ClusterRerouteRequestBuilder rerouteBuilder = client().admin().cluster().prepareReroute(); for (IntObjectCursor> shardStoreStatuses : storeStatuses) { int shardId = shardStoreStatuses.key; @@ -234,9 +244,11 @@ public void testForcePrimaryShardIfAllocationDecidersSayNoAfterIndexCreation() t .put("index.routing.allocation.exclude._name", node) .put("index.number_of_shards", 1).put("index.number_of_replicas", 0)).get(); - assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable().shardRoutingTable("test", 0).assignedShards(), empty()); + assertThat(client().admin().cluster().prepareState().get().getState().getRoutingTable() + .shardRoutingTable("test", 0).assignedShards(), empty()); - client().admin().cluster().prepareReroute().add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true)).get(); + client().admin().cluster().prepareReroute().add( + new AllocateEmptyPrimaryAllocationCommand("test", 0, node, true)).get(); ensureGreen("test"); } @@ -244,12 +256,14 @@ public void testDoNotRemoveAllocationIdOnNodeLeave() throws Exception { internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(Settings.EMPTY); assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); + .put("index.number_of_shards", 1).put("index.number_of_replicas", 1) + .put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); ensureGreen("test"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureYellow("test"); - assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test") + .inSyncAllocationIds(0).size()); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { @@ -257,8 +271,10 @@ public boolean clearData(String nodeName) { } }); logger.info("--> wait until shard is failed and becomes unassigned again"); - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); - assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() + .getRoutingTable().index("test").allPrimaryShardsUnassigned())); + assertEquals(2, client().admin().cluster().prepareState().get().getState() + .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); internalCluster().startDataOnlyNode(Settings.EMPTY); @@ -269,15 +285,19 @@ public void testRemoveAllocationIdOnWriteAfterNodeLeave() throws Exception { internalCluster().startMasterOnlyNode(Settings.EMPTY); internalCluster().startDataOnlyNode(Settings.EMPTY); assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() - .put("index.number_of_shards", 1).put("index.number_of_replicas", 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); + .put("index.number_of_shards", 1).put("index.number_of_replicas", + 1).put("index.unassigned.node_left.delayed_timeout", "0ms")).get()); String replicaNode = internalCluster().startDataOnlyNode(Settings.EMPTY); ensureGreen("test"); internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode)); ensureYellow("test"); - assertEquals(2, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + assertEquals(2, client().admin().cluster().prepareState().get().getState() + .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> indexing..."); - client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); - assertEquals(1, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject() + .field("field", "value1").endObject()).get(); + assertEquals(1, client().admin().cluster().prepareState().get().getState() + .metaData().index("test").inSyncAllocationIds(0).size()); internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { @Override public boolean clearData(String nodeName) { @@ -285,12 +305,15 @@ public boolean clearData(String nodeName) { } }); logger.info("--> wait until shard is failed and becomes unassigned again"); - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); - assertEquals(1, client().admin().cluster().prepareState().get().getState().metaData().index("test").inSyncAllocationIds(0).size()); + assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() + .getRoutingTable().index("test").allPrimaryShardsUnassigned())); + assertEquals(1, client().admin().cluster().prepareState().get().getState() + .metaData().index("test").inSyncAllocationIds(0).size()); logger.info("--> starting node that reuses data folder with the up-to-date shard"); internalCluster().startDataOnlyNode(Settings.EMPTY); - assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState().getRoutingTable().index("test").allPrimaryShardsUnassigned())); + assertBusy(() -> assertTrue(client().admin().cluster().prepareState().get().getState() + .getRoutingTable().index("test").allPrimaryShardsUnassigned())); } public void testNotWaitForQuorumCopies() throws Exception { @@ -300,7 +323,8 @@ public void testNotWaitForQuorumCopies() throws Exception { assertAcked(client().admin().indices().prepareCreate("test").setSettings(Settings.builder() .put("index.number_of_shards", randomIntBetween(1, 3)).put("index.number_of_replicas", 2)).get()); ensureGreen("test"); - client().prepareIndex("test", "type1").setSource(jsonBuilder().startObject().field("field", "value1").endObject()).get(); + client().prepareIndex("test", "type1").setSource(jsonBuilder() + .startObject().field("field", "value1").endObject()).get(); logger.info("--> removing 2 nodes from cluster"); internalCluster().stopRandomDataNode(); internalCluster().stopRandomDataNode(); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java index 0f55270354301..a6c2fab5c91e4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/RoutingTableTests.java @@ -76,10 +76,13 @@ public void setUp() throws Exception { .build(); RoutingTable testRoutingTable = new RoutingTable.Builder() - .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_1).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_1)).build()) - .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_2).getIndex()).initializeAsNew(metaData.index(TEST_INDEX_2)).build()) + .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_1). + getIndex()).initializeAsNew(metaData.index(TEST_INDEX_1)).build()) + .add(new IndexRoutingTable.Builder(metaData.index(TEST_INDEX_2) + .getIndex()).initializeAsNew(metaData.index(TEST_INDEX_2)).build()) .build(); - this.clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)).metaData(metaData).routingTable(testRoutingTable).build(); + this.clusterState = ClusterState.builder(org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING + .getDefault(Settings.EMPTY)).metaData(metaData).routingTable(testRoutingTable).build(); } /** @@ -99,7 +102,8 @@ private void initPrimaries() { private void startInitializingShards(String index) { logger.info("start primary shards for index {}", index); - this.clusterState = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING)); + this.clusterState = ALLOCATION_SERVICE.applyStartedShards(this.clusterState, + this.clusterState.getRoutingNodes().shardsWithState(index, INITIALIZING)); } private IndexMetaData.Builder createIndexMetaData(String indexName) { @@ -142,20 +146,23 @@ public void testShardsWithState() { assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards)); initPrimaries(); - assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - 2 * this.numberOfShards)); + assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), + is(this.totalNumberOfShards - 2 * this.numberOfShards)); assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(2 * this.numberOfShards)); startInitializingShards(TEST_INDEX_1); assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.STARTED).size(), is(this.numberOfShards)); int initializingExpected = this.numberOfShards + this.numberOfShards * this.numberOfReplicas; assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected)); - assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - this.numberOfShards)); + assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), + is(this.totalNumberOfShards - initializingExpected - this.numberOfShards)); startInitializingShards(TEST_INDEX_2); assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.STARTED).size(), is(2 * this.numberOfShards)); initializingExpected = 2 * this.numberOfShards * this.numberOfReplicas; assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING).size(), is(initializingExpected)); - assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), is(this.totalNumberOfShards - initializingExpected - 2 * this.numberOfShards)); + assertThat(clusterState.routingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size(), + is(this.totalNumberOfShards - initializingExpected - 2 * this.numberOfShards)); // now start all replicas too startInitializingShards(TEST_INDEX_1); @@ -168,21 +175,29 @@ public void testActivePrimaryShardsGrouped() { assertThat(this.emptyRoutingTable.activePrimaryShardsGrouped(new String[0], false).size(), is(0)); assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), + is(this.numberOfShards)); initPrimaries(); assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), + is(this.numberOfShards)); startInitializingShards(TEST_INDEX_1); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), + is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.numberOfShards)); startInitializingShards(TEST_INDEX_2); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); - assertThat(clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(2 * this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); + assertThat(clusterState.routingTable().activePrimaryShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(2 * this.numberOfShards)); try { clusterState.routingTable().activePrimaryShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true); @@ -197,21 +212,29 @@ public void testAllActiveShardsGrouped() { assertThat(this.emptyRoutingTable.allActiveShardsGrouped(new String[0], false).size(), is(0)); assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); initPrimaries(); assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); startInitializingShards(TEST_INDEX_1); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); startInitializingShards(TEST_INDEX_2); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); - assertThat(clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_2}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); + assertThat(clusterState.routingTable().allActiveShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards)); try { clusterState.routingTable().allActiveShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, true); @@ -222,14 +245,19 @@ public void testAllActiveShardsGrouped() { public void testAllAssignedShardsGrouped() { assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(0)); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); initPrimaries(); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1}, false).size(), is(this.numberOfShards)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1}, true).size(), is(this.shardsPerIndex)); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); - assertThat(clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, false).size(), is(2 * this.numberOfShards)); + assertThat(clusterState.routingTable().allAssignedShardsGrouped( + new String[]{TEST_INDEX_1, TEST_INDEX_2}, true).size(), is(this.totalNumberOfShards)); try { clusterState.routingTable().allAssignedShardsGrouped(new String[]{TEST_INDEX_1, "not_exists"}, false); @@ -336,7 +364,8 @@ public void testDistinctNodes() { ShardRouting routing1 = TestShardRouting.newShardRouting(shardId, "node1", randomBoolean(), ShardRoutingState.STARTED); ShardRouting routing2 = TestShardRouting.newShardRouting(shardId, "node2", randomBoolean(), ShardRoutingState.STARTED); ShardRouting routing3 = TestShardRouting.newShardRouting(shardId, "node1", randomBoolean(), ShardRoutingState.STARTED); - ShardRouting routing4 = TestShardRouting.newShardRouting(shardId, "node3", "node2", randomBoolean(), ShardRoutingState.RELOCATING); + ShardRouting routing4 = TestShardRouting.newShardRouting( + shardId, "node3", "node2", randomBoolean(), ShardRoutingState.RELOCATING); assertTrue(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing2))); assertFalse(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing3))); assertFalse(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing1, routing2, routing3))); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java index f87f918d99ecc..1216f143686c4 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java @@ -35,8 +35,10 @@ public class ShardRoutingTests extends ESTestCase { public void testIsSameAllocation() { ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, false, ShardRoutingState.UNASSIGNED); ShardRouting unassignedShard1 = TestShardRouting.newShardRouting("test", 1, null, false, ShardRoutingState.UNASSIGNED); - ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "1", randomBoolean(), ShardRoutingState.INITIALIZING); - ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "1", randomBoolean(), ShardRoutingState.INITIALIZING); + ShardRouting initializingShard0 = + TestShardRouting.newShardRouting("test", 0, "1", randomBoolean(), ShardRoutingState.INITIALIZING); + ShardRouting initializingShard1 = + TestShardRouting.newShardRouting("test", 1, "1", randomBoolean(), ShardRoutingState.INITIALIZING); ShardRouting startedShard0 = initializingShard0.moveToStarted(); ShardRouting startedShard1 = initializingShard1.moveToStarted(); @@ -63,9 +65,12 @@ private ShardRouting randomShardRouting(String index, int shard) { } public void testIsSourceTargetRelocation() { - ShardRouting unassignedShard0 = TestShardRouting.newShardRouting("test", 0, null, false, ShardRoutingState.UNASSIGNED); - ShardRouting initializingShard0 = TestShardRouting.newShardRouting("test", 0, "node1", randomBoolean(), ShardRoutingState.INITIALIZING); - ShardRouting initializingShard1 = TestShardRouting.newShardRouting("test", 1, "node1", randomBoolean(), ShardRoutingState.INITIALIZING); + ShardRouting unassignedShard0 = + TestShardRouting.newShardRouting("test", 0, null, false, ShardRoutingState.UNASSIGNED); + ShardRouting initializingShard0 = + TestShardRouting.newShardRouting("test", 0, "node1", randomBoolean(), ShardRoutingState.INITIALIZING); + ShardRouting initializingShard1 = + TestShardRouting.newShardRouting("test", 1, "node1", randomBoolean(), ShardRoutingState.INITIALIZING); assertFalse(initializingShard0.isRelocationTarget()); ShardRouting startedShard0 = initializingShard0.moveToStarted(); assertFalse(startedShard0.isRelocationTarget()); @@ -126,7 +131,8 @@ public void testEqualsIgnoringVersion() { break; case 1: // change shard id - otherRouting = new ShardRouting(new ShardId(otherRouting.index(), otherRouting.id() + 1), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), + otherRouting = new ShardRouting(new ShardId(otherRouting.index(), otherRouting.id() + 1), + otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize()); break; @@ -135,9 +141,9 @@ public void testEqualsIgnoringVersion() { if (otherRouting.assignedToNode() == false) { unchanged = true; } else { - otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId() + "_1", otherRouting.relocatingNodeId(), - otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), - otherRouting.allocationId(), otherRouting.getExpectedShardSize()); + otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId() + "_1", + otherRouting.relocatingNodeId(), otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), + otherRouting.unassignedInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize()); } break; case 3: @@ -145,9 +151,10 @@ public void testEqualsIgnoringVersion() { if (otherRouting.relocating() == false) { unchanged = true; } else { - otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId() + "_1", - otherRouting.primary(), otherRouting.state(), otherRouting.recoverySource(), otherRouting.unassignedInfo(), - otherRouting.allocationId(), otherRouting.getExpectedShardSize()); + otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), + otherRouting.relocatingNodeId() + "_1", otherRouting.primary(), otherRouting.state(), + otherRouting.recoverySource(), otherRouting.unassignedInfo(), otherRouting.allocationId(), + otherRouting.getExpectedShardSize()); } break; case 4: @@ -155,16 +162,18 @@ public void testEqualsIgnoringVersion() { if (otherRouting.active() || otherRouting.primary() == false) { unchanged = true; } else { - otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), - otherRouting.primary(), otherRouting.state(), - new RecoverySource.SnapshotRecoverySource(new Snapshot("test", new SnapshotId("s1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), + otherRouting = new ShardRouting(otherRouting.shardId(), otherRouting.currentNodeId(), + otherRouting.relocatingNodeId(), otherRouting.primary(), otherRouting.state(), + new RecoverySource.SnapshotRecoverySource(new Snapshot("test", + new SnapshotId("s1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), otherRouting.unassignedInfo(), otherRouting.allocationId(), otherRouting.getExpectedShardSize()); } break; case 5: // change primary flag - otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), - otherRouting.primary() == false, otherRouting.state(), otherRouting.unassignedInfo()); + otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), + otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), otherRouting.primary() == false, + otherRouting.state(), otherRouting.unassignedInfo()); break; case 6: // change state @@ -174,12 +183,14 @@ public void testEqualsIgnoringVersion() { } while (newState == otherRouting.state()); UnassignedInfo unassignedInfo = otherRouting.unassignedInfo(); - if (unassignedInfo == null && (newState == ShardRoutingState.UNASSIGNED || newState == ShardRoutingState.INITIALIZING)) { + if (unassignedInfo == null && (newState == ShardRoutingState.UNASSIGNED || + newState == ShardRoutingState.INITIALIZING)) { unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test"); } otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), - newState == ShardRoutingState.UNASSIGNED ? null : (otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId()), + newState == ShardRoutingState.UNASSIGNED ? null : + (otherRouting.currentNodeId() == null ? "1" : otherRouting.currentNodeId()), newState == ShardRoutingState.RELOCATING ? "2" : null, otherRouting.primary(), newState, unassignedInfo); break; @@ -187,15 +198,16 @@ public void testEqualsIgnoringVersion() { if (randomBoolean()) { // change unassigned info - otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), - otherRouting.primary(), otherRouting.state(), + otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), + otherRouting.currentNodeId(), otherRouting.relocatingNodeId(), otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo() == null ? new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "test") : - new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, otherRouting.unassignedInfo().getMessage() + "_1")); + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, otherRouting.unassignedInfo().getMessage() + "_1")); } if (unchanged == false) { logger.debug("comparing\nthis {} to\nother {}", routing, otherRouting); - assertFalse("expected non-equality\nthis " + routing + ",\nother " + otherRouting, routing.equalsIgnoringMetaData(otherRouting)); + assertFalse("expected non-equality\nthis " + routing + ",\nother " + otherRouting, + routing.equalsIgnoringMetaData(otherRouting)); } } } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java index d8f7f6552f908..2f213aea32b2c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java @@ -80,8 +80,8 @@ public void testReasonOrdinalOrder() { public void testSerialization() throws Exception { UnassignedInfo.Reason reason = RandomPicks.randomFrom(random(), UnassignedInfo.Reason.values()); UnassignedInfo meta = reason == UnassignedInfo.Reason.ALLOCATION_FAILED ? - new UnassignedInfo(reason, randomBoolean() ? randomAlphaOfLength(4) : null, null, randomIntBetween(1, 100), System.nanoTime(), - System.currentTimeMillis(), false, AllocationStatus.NO_ATTEMPT): + new UnassignedInfo(reason, randomBoolean() ? randomAlphaOfLength(4) : null, null, + randomIntBetween(1, 100), System.nanoTime(), System.currentTimeMillis(), false, AllocationStatus.NO_ATTEMPT): new UnassignedInfo(reason, randomBoolean() ? randomAlphaOfLength(4) : null); BytesStreamOutput out = new BytesStreamOutput(); meta.writeTo(out); @@ -97,7 +97,8 @@ public void testSerialization() throws Exception { public void testIndexCreated() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -109,7 +110,8 @@ public void testIndexCreated() { public void testClusterRecovered() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -121,7 +123,8 @@ public void testClusterRecovered() { public void testIndexReopened() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -133,7 +136,8 @@ public void testIndexReopened() { public void testNewIndexRestored() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -145,11 +149,14 @@ public void testNewIndexRestored() { public void testExistingIndexRestored() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) - .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new SnapshotRecoverySource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()).build(); + .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), + new SnapshotRecoverySource(new Snapshot("rep1", + new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()).build(); for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) { assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED)); } @@ -157,7 +164,8 @@ public void testExistingIndexRestored() { public void testDanglingIndexImported() { MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) + .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 3)).numberOfReplicas(randomIntBetween(0, 3))) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) @@ -185,17 +193,20 @@ public void testReplicaAdded() { builder.addIndexShard(indexShardRoutingTable); } builder.addReplica(); - clusterState = ClusterState.builder(clusterState).routingTable(RoutingTable.builder(clusterState.routingTable()).add(builder).build()).build(); + clusterState = ClusterState.builder(clusterState) + .routingTable(RoutingTable.builder(clusterState.routingTable()).add(builder).build()).build(); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo(), notNullValue()); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.REPLICA_ADDED)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), + equalTo(UnassignedInfo.Reason.REPLICA_ADDED)); } /** * The unassigned meta is kept when a shard goes to INITIALIZING, but cleared when it moves to STARTED. */ public void testStateTransitionMetaHandling() { - ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); + ShardRouting shard = TestShardRouting.newShardRouting("test", 1, null, null, + true, ShardRoutingState.UNASSIGNED, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, null)); assertThat(shard.unassignedInfo(), notNullValue()); shard = shard.initialize("test_node", null, -1); assertThat(shard.state(), equalTo(ShardRoutingState.INITIALIZING)); @@ -216,7 +227,8 @@ public void testNodeLeave() { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -230,8 +242,10 @@ public void testNodeLeave() { assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo(), notNullValue()); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NODE_LEFT)); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), greaterThan(0L)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), + equalTo(UnassignedInfo.Reason.NODE_LEFT)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), + greaterThan(0L)); } /** @@ -245,7 +259,8 @@ public void testFailedShard() { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); // starting primaries clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); @@ -254,15 +269,20 @@ public void testFailedShard() { assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(false)); // fail shard ShardRouting shardToFail = clusterState.getRoutingNodes().shardsWithState(STARTED).get(0); - clusterState = allocation.applyFailedShards(clusterState, Collections.singletonList(new FailedShard(shardToFail, "test fail", null, randomBoolean()))); + clusterState = allocation.applyFailedShards(clusterState, + Collections.singletonList(new FailedShard(shardToFail, "test fail", null, randomBoolean()))); // verify the reason and details assertThat(clusterState.getRoutingNodes().unassigned().size() > 0, equalTo(true)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).size(), equalTo(1)); assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo(), notNullValue()); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getMessage(), equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getDetails(), equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")); - assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), greaterThan(0L)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getReason(), + equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED)); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getMessage(), + equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getDetails(), + equalTo("failed shard on node [" + shardToFail.currentNodeId() + "]: test fail")); + assertThat(clusterState.getRoutingNodes().shardsWithState(UNASSIGNED).get(0).unassignedInfo().getUnassignedTimeInMillis(), + greaterThan(0L)); } /** @@ -273,7 +293,8 @@ public void testRemainingDelayCalculation() throws Exception { UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "test", null, 0, baseTime, System.currentTimeMillis(), randomBoolean(), AllocationStatus.NO_ATTEMPT); final long totalDelayNanos = TimeValue.timeValueMillis(10).nanos(); - final Settings indexSettings = Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueNanos(totalDelayNanos)).build(); + final Settings indexSettings = Settings.builder() + .put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueNanos(totalDelayNanos)).build(); long delay = unassignedInfo.getRemainingDelay(baseTime, indexSettings); assertThat(delay, equalTo(totalDelayNanos)); long delta1 = randomIntBetween(1, (int) (totalDelayNanos - 1)); @@ -295,7 +316,8 @@ public void testNumberOfDelayedUnassigned() throws Exception { ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries @@ -317,13 +339,16 @@ public void testFindNextDelayedAllocation() { final long expectMinDelaySettingsNanos = Math.min(delayTest1.nanos(), delayTest2.nanos()); MetaData metaData = MetaData.builder() - .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest1)).numberOfShards(1).numberOfReplicas(1)) - .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest2)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetaData.builder("test1").settings(settings(Version.CURRENT).put( + UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest1)).numberOfShards(1).numberOfReplicas(1)) + .put(IndexMetaData.builder("test2").settings(settings(Version.CURRENT).put( + UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), delayTest2)).numberOfShards(1).numberOfReplicas(1)) .build(); ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) .metaData(metaData) .routingTable(RoutingTable.builder().addAsNew(metaData.index("test1")).addAsNew(metaData.index("test2")).build()).build(); - clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); + clusterState = ClusterState.builder(clusterState) + .nodes(DiscoveryNodes.builder().add(newNode("node1")).add(newNode("node2"))).build(); clusterState = allocation.reroute(clusterState, "reroute"); assertThat(UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), equalTo(0)); // starting primaries @@ -344,7 +369,8 @@ public void testFindNextDelayedAllocation() { clusterState = allocation.reroute(clusterState, "time moved"); } - assertThat(UnassignedInfo.findNextDelayedAllocation(baseTime + delta, clusterState), equalTo(expectMinDelaySettingsNanos - delta)); + assertThat(UnassignedInfo.findNextDelayedAllocation(baseTime + delta, clusterState), + equalTo(expectMinDelaySettingsNanos - delta)); } public void testAllocationStatusSerialization() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java index fdff68510af84..fb3eac28b6793 100644 --- a/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java +++ b/server/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsIT.java @@ -445,7 +445,8 @@ public void testEngineGCDeletesSetting() throws InterruptedException { client().prepareDelete("test", "type", "1").get(); // sets version to 4 Thread.sleep(300); // wait for cache time to change TODO: this needs to be solved better. To be discussed. // delete is should not be in cache - assertThrows(client().prepareIndex("test", "type", "1").setSource("f", 3).setVersion(4), VersionConflictEngineException.class); + assertThrows(client().prepareIndex("test", "type", "1").setSource("f", 3) + .setVersion(4), VersionConflictEngineException.class); }