diff --git a/build.gradle b/build.gradle index bb3a0c5754d71..778a62faebb8b 100644 --- a/build.gradle +++ b/build.gradle @@ -25,6 +25,9 @@ import org.elasticsearch.gradle.VersionCollection import org.elasticsearch.gradle.VersionProperties import org.elasticsearch.gradle.plugin.PluginBuildPlugin import org.gradle.plugins.ide.eclipse.model.SourceFolder +import com.carrotsearch.gradle.junit4.RandomizedTestingTask + +import java.util.function.Predicate plugins { id 'com.gradle.build-scan' version '1.13.2' @@ -611,3 +614,19 @@ allprojects { } } } + +allprojects { + task checkPart1 + task checkPart2 + tasks.matching { it.name == "check" }.all { check -> + if (check.path.startsWith(":x-pack:")) { + checkPart2.dependsOn check + } else { + checkPart1.dependsOn check + } + } +} + + + + diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 1bb6b75e2c524..2beecb1922f25 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -443,11 +443,11 @@ class BuildPlugin implements Plugin { // such that we don't have to pass hardcoded files to gradle repos.mavenLocal() } - repos.jcenter() repos.maven { name "elastic" url "https://artifacts.elastic.co/maven" } + repos.jcenter() String luceneVersion = VersionProperties.lucene if (luceneVersion.contains('-snapshot')) { // extract the revision number from the version with a regex matcher diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 8093a76df0e36..be6dea29f6cd9 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -45,90 +45,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -192,56 +108,12 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - @@ -314,5 +186,4 @@ - diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java index 68933093ae794..fa0acd762dd75 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java @@ -23,6 +23,8 @@ import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutFollowRequest; import org.elasticsearch.client.ccr.PutFollowResponse; +import org.elasticsearch.client.ccr.ResumeFollowRequest; +import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; import java.io.IOException; @@ -89,7 +91,7 @@ public void putFollowAsync(PutFollowRequest request, } /** - * Instructs a follower index the pause the following of a leader index. + * Instructs a follower index to pause the following of a leader index. * * See * the docs for more. @@ -110,7 +112,7 @@ public AcknowledgedResponse pauseFollow(PauseFollowRequest request, RequestOptio } /** - * Asynchronously instruct a follower index the pause the following of a leader index. + * Asynchronously instruct a follower index to pause the following of a leader index. * * See * the docs for more. @@ -130,4 +132,91 @@ public void pauseFollowAsync(PauseFollowRequest request, Collections.emptySet()); } + /** + * Instructs a follower index to resume the following of a leader index. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse resumeFollow(ResumeFollowRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + request, + CcrRequestConverters::resumeFollow, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet() + ); + } + + /** + * Asynchronously instruct a follower index to resume the following of a leader index. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public void resumeFollowAsync(ResumeFollowRequest request, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( + request, + CcrRequestConverters::resumeFollow, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Instructs a follower index to unfollow and become a regular index. + * Note that index following needs to be paused and the follower index needs to be closed. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response + * @throws IOException in case there is a problem sending the request or parsing back the response + */ + public AcknowledgedResponse unfollow(UnfollowRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity( + request, + CcrRequestConverters::unfollow, + options, + AcknowledgedResponse::fromXContent, + Collections.emptySet() + ); + } + + /** + * Asynchronously instructs a follower index to unfollow and become a regular index. + * Note that index following needs to be paused and the follower index needs to be closed. + * + * See + * the docs for more. + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public void unfollowAsync(UnfollowRequest request, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity( + request, + CcrRequestConverters::unfollow, + options, + AcknowledgedResponse::fromXContent, + listener, + Collections.emptySet() + ); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java index eee5715d58629..173c96acc6ef8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java @@ -23,6 +23,8 @@ import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutFollowRequest; +import org.elasticsearch.client.ccr.ResumeFollowRequest; +import org.elasticsearch.client.ccr.UnfollowRequest; import java.io.IOException; @@ -49,4 +51,22 @@ static Request pauseFollow(PauseFollowRequest pauseFollowRequest) { return new Request(HttpPost.METHOD_NAME, endpoint); } + static Request resumeFollow(ResumeFollowRequest resumeFollowRequest) throws IOException { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPart(resumeFollowRequest.getFollowerIndex()) + .addPathPartAsIs("_ccr", "resume_follow") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(resumeFollowRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request unfollow(UnfollowRequest unfollowRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPart(unfollowRequest.getFollowerIndex()) + .addPathPartAsIs("_ccr", "unfollow") + .build(); + return new Request(HttpPost.METHOD_NAME, endpoint); + } + } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java index bf99c37c81745..224c6b2caf434 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IndexLifecycleClient.java @@ -76,7 +76,7 @@ public void getLifecyclePolicyAsync(GetLifecyclePolicyRequest request, RequestOp } /** - * Create or modify a lifecycle definition See * the docs for more. * @param request the request @@ -91,8 +91,8 @@ public AcknowledgedResponse putLifecyclePolicy(PutLifecyclePolicyRequest request } /** - * Asynchronously create or modify a lifecycle definition - * See + * Asynchronously create or modify a lifecycle definition. See * the docs for more. * @param request the request * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized @@ -282,7 +282,7 @@ public void explainLifecycleAsync(ExplainLifecycleRequest request, RequestOption * @return the response * @throws IOException in case there is a problem sending the request or parsing back the response */ - public AcknowledgedResponse retryLifecycleStep(RetryLifecyclePolicyRequest request, RequestOptions options) throws IOException { + public AcknowledgedResponse retryLifecyclePolicy(RetryLifecyclePolicyRequest request, RequestOptions options) throws IOException { return restHighLevelClient.performRequestAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, AcknowledgedResponse::fromXContent, emptySet()); } @@ -295,8 +295,8 @@ public AcknowledgedResponse retryLifecycleStep(RetryLifecyclePolicyRequest reque * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized * @param listener the listener to be notified upon request completion */ - public void retryLifecycleStepAsync(RetryLifecyclePolicyRequest request, RequestOptions options, - ActionListener listener) { + public void retryLifecyclePolicyAsync(RetryLifecyclePolicyRequest request, RequestOptions options, + ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, IndexLifecycleRequestConverters::retryLifecycle, options, AcknowledgedResponse::fromXContent, listener, emptySet()); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java index 7d916d89c8494..15c2b1617d298 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MLRequestConverters.java @@ -28,6 +28,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.client.RequestConverters.EndpointBuilder; import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteCalendarJobRequest; import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteFilterRequest; @@ -37,6 +38,7 @@ import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetCalendarEventsRequest; import org.elasticsearch.client.ml.GetCalendarsRequest; import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetDatafeedRequest; @@ -49,17 +51,21 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest; import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.PostCalendarEventRequest; import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PreviewDatafeedRequest; +import org.elasticsearch.client.ml.PutCalendarJobRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.UpdateModelSnapshotRequest; import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -390,6 +396,36 @@ static Request getModelSnapshots(GetModelSnapshotsRequest getModelSnapshotsReque return request; } + static Request updateModelSnapshot(UpdateModelSnapshotRequest updateModelSnapshotRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(updateModelSnapshotRequest.getJobId()) + .addPathPartAsIs("model_snapshots") + .addPathPart(updateModelSnapshotRequest.getSnapshotId()) + .addPathPartAsIs("_update") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(updateModelSnapshotRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request revertModelSnapshot(RevertModelSnapshotRequest revertModelSnapshotsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("anomaly_detectors") + .addPathPart(revertModelSnapshotsRequest.getJobId()) + .addPathPartAsIs("model_snapshots") + .addPathPart(revertModelSnapshotsRequest.getSnapshotId()) + .addPathPart("_revert") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(revertModelSnapshotsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request getOverallBuckets(GetOverallBucketsRequest getOverallBucketsRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -485,6 +521,30 @@ static Request getCalendars(GetCalendarsRequest getCalendarsRequest) throws IOEx return request; } + static Request putCalendarJob(PutCalendarJobRequest putCalendarJobRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("calendars") + .addPathPart(putCalendarJobRequest.getCalendarId()) + .addPathPartAsIs("jobs") + .addPathPart(Strings.collectionToCommaDelimitedString(putCalendarJobRequest.getJobIds())) + .build(); + return new Request(HttpPut.METHOD_NAME, endpoint); + } + + static Request deleteCalendarJob(DeleteCalendarJobRequest deleteCalendarJobRequest) { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("calendars") + .addPathPart(deleteCalendarJobRequest.getCalendarId()) + .addPathPartAsIs("jobs") + .addPathPart(Strings.collectionToCommaDelimitedString(deleteCalendarJobRequest.getJobIds())) + .build(); + return new Request(HttpDelete.METHOD_NAME, endpoint); + } + static Request deleteCalendar(DeleteCalendarRequest deleteCalendarRequest) { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") @@ -496,6 +556,34 @@ static Request deleteCalendar(DeleteCalendarRequest deleteCalendarRequest) { return request; } + static Request getCalendarEvents(GetCalendarEventsRequest getCalendarEventsRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("calendars") + .addPathPart(getCalendarEventsRequest.getCalendarId()) + .addPathPartAsIs("events") + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(getCalendarEventsRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + + static Request postCalendarEvents(PostCalendarEventRequest postCalendarEventRequest) throws IOException { + String endpoint = new EndpointBuilder() + .addPathPartAsIs("_xpack") + .addPathPartAsIs("ml") + .addPathPartAsIs("calendars") + .addPathPart(postCalendarEventRequest.getCalendarId()) + .addPathPartAsIs("events") + .build(); + Request request = new Request(HttpPost.METHOD_NAME, endpoint); + request.setEntity(createEntity(postCalendarEventRequest, + REQUEST_BODY_CONTENT_TYPE, + PostCalendarEventRequest.EXCLUDE_CALENDAR_ID_PARAMS)); + return request; + } + static Request putFilter(PutFilterRequest putFilterRequest) throws IOException { String endpoint = new EndpointBuilder() .addPathPartAsIs("_xpack") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java index 7455bf4274c73..bba67a792da9e 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/MachineLearningClient.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteCalendarJobRequest; import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteFilterRequest; @@ -35,6 +36,8 @@ import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCalendarEventsRequest; +import org.elasticsearch.client.ml.GetCalendarEventsResponse; import org.elasticsearch.client.ml.GetCalendarsRequest; import org.elasticsearch.client.ml.GetCalendarsResponse; import org.elasticsearch.client.ml.GetCategoriesRequest; @@ -59,10 +62,13 @@ import org.elasticsearch.client.ml.GetRecordsResponse; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PostCalendarEventRequest; +import org.elasticsearch.client.ml.PostCalendarEventResponse; import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PostDataResponse; import org.elasticsearch.client.ml.PreviewDatafeedRequest; import org.elasticsearch.client.ml.PreviewDatafeedResponse; +import org.elasticsearch.client.ml.PutCalendarJobRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; @@ -71,6 +77,8 @@ import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.RevertModelSnapshotRequest; +import org.elasticsearch.client.ml.RevertModelSnapshotResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; @@ -78,6 +86,8 @@ import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.UpdateModelSnapshotRequest; +import org.elasticsearch.client.ml.UpdateModelSnapshotResponse; import org.elasticsearch.client.ml.job.stats.JobStats; import java.io.IOException; @@ -507,6 +517,47 @@ public void deleteModelSnapshotAsync(DeleteModelSnapshotRequest request, Request Collections.emptySet()); } + /** + * Reverts to a particular Machine Learning Model Snapshot + *

+ * For additional info + * see + * ML Revert Model Snapshot documentation + * + * @param request The request to revert to a previous model snapshot + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return action acknowledgement + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public RevertModelSnapshotResponse revertModelSnapshot(RevertModelSnapshotRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::revertModelSnapshot, + options, + RevertModelSnapshotResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Reverts to a particular Machine Learning Model Snapshot asynchronously and notifies the listener on completion + *

+ * For additional info + * see + * ML Revert Model Snapshot documentation + * + * @param request The request to revert to a previous model snapshot + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void revertModelSnapshotAsync(RevertModelSnapshotRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::revertModelSnapshot, + options, + RevertModelSnapshotResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Creates a new Machine Learning Datafeed *

@@ -983,6 +1034,47 @@ public void getModelSnapshotsAsync(GetModelSnapshotsRequest request, RequestOpti Collections.emptySet()); } + /** + * Updates a snapshot for a Machine Learning Job. + *

+ * For additional info + * see + * ML UPDATE model snapshots documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public UpdateModelSnapshotResponse updateModelSnapshot(UpdateModelSnapshotRequest request, + RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::updateModelSnapshot, + options, + UpdateModelSnapshotResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Updates a snapshot for a Machine Learning Job, notifies listener once the requested snapshots are retrieved. + *

+ * For additional info + * see + * ML UPDATE model snapshots documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void updateModelSnapshotAsync(UpdateModelSnapshotRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::updateModelSnapshot, + options, + UpdateModelSnapshotResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Gets overall buckets for a set of Machine Learning Jobs. *

@@ -1217,6 +1309,88 @@ public void putCalendarAsync(PutCalendarRequest request, RequestOptions options, Collections.emptySet()); } + /** + * Adds Machine Learning Job(s) to a calendar + *

+ * For additional info + * see + * ML Put calendar job documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return The {@link PutCalendarResponse} containing the updated calendar + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutCalendarResponse putCalendarJob(PutCalendarJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::putCalendarJob, + options, + PutCalendarResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Adds Machine Learning Job(s) to a calendar, notifies listener when completed + *

+ * For additional info + * see + * ML Put calendar job documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void putCalendarJobAsync(PutCalendarJobRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::putCalendarJob, + options, + PutCalendarResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Removes Machine Learning Job(s) from a calendar + *

+ * For additional info + * see + * ML Delete calendar job documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return The {@link PutCalendarResponse} containing the updated calendar + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PutCalendarResponse deleteCalendarJob(DeleteCalendarJobRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::deleteCalendarJob, + options, + PutCalendarResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Removes Machine Learning Job(s) from a calendar, notifies listener when completed + *

+ * For additional info + * see + * ML Delete calendar job documentation + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void deleteCalendarJobAsync(DeleteCalendarJobRequest request, + RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::deleteCalendarJob, + options, + PutCalendarResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Deletes the given Machine Learning Calendar *

@@ -1257,6 +1431,88 @@ public void deleteCalendarAsync(DeleteCalendarRequest request, RequestOptions op Collections.emptySet()); } + /** + * Gets the events for a machine learning calendar + *

+ * For additional info + * see + * GET Calendar Events API + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return The {@link PostCalendarEventRequest} containing the scheduled events + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetCalendarEventsResponse getCalendarEvents(GetCalendarEventsRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::getCalendarEvents, + options, + GetCalendarEventsResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Gets the events for a a machine learning calendar asynchronously, notifies the listener on completion + *

+ * For additional info + * see + * GET Calendar Events API + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getCalendarEventsAsync(GetCalendarEventsRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::getCalendarEvents, + options, + GetCalendarEventsResponse::fromXContent, + listener, + Collections.emptySet()); + } + + /** + * Creates new events for a a machine learning calendar + *

+ * For additional info + * see + * Add Events to Calendar API + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return The {@link PostCalendarEventRequest} containing the scheduled events + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public PostCalendarEventResponse postCalendarEvent(PostCalendarEventRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + MLRequestConverters::postCalendarEvents, + options, + PostCalendarEventResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Creates new events for a a machine learning calendar asynchronously, notifies the listener on completion + *

+ * For additional info + * see + * Add Events to Calendar API + * + * @param request The request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void postCalendarEventAsync(PostCalendarEventRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + MLRequestConverters::postCalendarEvents, + options, + PostCalendarEventResponse::fromXContent, + listener, + Collections.emptySet()); + } + /** * Creates a new Machine Learning Filter *

diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 82bd825d51bca..c9d656ad85aa9 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -50,6 +50,7 @@ import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.core.CountRequest; +import org.elasticsearch.client.core.MultiTermVectorsRequest; import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.client.security.RefreshPolicy; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -121,7 +122,8 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { Params parameters = new Params(request); parameters.withTimeout(bulkRequest.timeout()); parameters.withRefreshPolicy(bulkRequest.getRefreshPolicy()); - + parameters.withPipeline(bulkRequest.pipeline()); + parameters.withRouting(bulkRequest.routing()); // Bulk API only supports newline delimited JSON or Smile. Before executing // the bulk, we need to check that all requests have the same content-type // and this content-type is supported by the Bulk API. @@ -634,6 +636,13 @@ static Request termVectors(TermVectorsRequest tvrequest) throws IOException { return request; } + static Request mtermVectors(MultiTermVectorsRequest mtvrequest) throws IOException { + String endpoint = "_mtermvectors"; + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + request.setEntity(createEntity(mtvrequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request getScript(GetStoredScriptRequest getStoredScriptRequest) { String endpoint = new EndpointBuilder().addPathPartAsIs("_scripts").addPathPart(getStoredScriptRequest.id()).build(); Request request = new Request(HttpGet.METHOD_NAME, endpoint); @@ -652,7 +661,12 @@ static Request deleteScript(DeleteStoredScriptRequest deleteStoredScriptRequest) } static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) throws IOException { - BytesRef source = XContentHelper.toXContent(toXContent, xContentType, false).toBytesRef(); + return createEntity(toXContent, xContentType, ToXContent.EMPTY_PARAMS); + } + + static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) + throws IOException { + BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 31a7e1319d960..e87fc4e328f3b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -61,6 +61,8 @@ import org.elasticsearch.client.core.CountResponse; import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.client.core.TermVectorsRequest; +import org.elasticsearch.client.core.MultiTermVectorsRequest; +import org.elasticsearch.client.core.MultiTermVectorsResponse; import org.elasticsearch.client.tasks.TaskSubmissionResponse; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; @@ -1440,6 +1442,37 @@ public final void termvectorsAsync(TermVectorsRequest request, RequestOptions op } + /** + * Calls the Multi Term Vectors API + * + * See Multi Term Vectors API + * on elastic.co + * + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + */ + public final MultiTermVectorsResponse mtermvectors(MultiTermVectorsRequest request, RequestOptions options) throws IOException { + return performRequestAndParseEntity( + request, RequestConverters::mtermVectors, options, MultiTermVectorsResponse::fromXContent, emptySet()); + } + + + /** + * Asynchronously calls the Multi Term Vectors API + * + * See Multi Term Vectors API + * on elastic.co + * @param request the request + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public final void mtermvectorsAsync(MultiTermVectorsRequest request, RequestOptions options, + ActionListener listener) { + performRequestAsyncAndParseEntity( + request, RequestConverters::mtermVectors, options, MultiTermVectorsResponse::fromXContent, listener, emptySet()); + } + + /** * Executes a request using the Ranking Evaluation API. * See Ranking Evaluation API diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java index 68bb9b9a28b99..93d29056a707a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityClient.java @@ -42,6 +42,8 @@ import org.elasticsearch.client.security.GetRoleMappingsResponse; import org.elasticsearch.client.security.GetSslCertificatesRequest; import org.elasticsearch.client.security.GetSslCertificatesResponse; +import org.elasticsearch.client.security.HasPrivilegesRequest; +import org.elasticsearch.client.security.HasPrivilegesResponse; import org.elasticsearch.client.security.InvalidateTokenRequest; import org.elasticsearch.client.security.InvalidateTokenResponse; import org.elasticsearch.client.security.PutRoleMappingRequest; @@ -244,6 +246,34 @@ public void authenticateAsync(RequestOptions options, ActionListener + * the docs for more. + * + * @param request the request with the privileges to check + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return the response from the has privileges call + */ + public HasPrivilegesResponse hasPrivileges(HasPrivilegesRequest request, RequestOptions options) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, SecurityRequestConverters::hasPrivileges, options, + HasPrivilegesResponse::fromXContent, emptySet()); + } + + /** + * Asynchronously determine whether the current user has a specified list of privileges + * See + * the docs for more. + * + * @param request the request with the privileges to check + * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener the listener to be notified upon request completion + */ + public void hasPrivilegesAsync(HasPrivilegesRequest request, RequestOptions options, ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, SecurityRequestConverters::hasPrivileges, options, + HasPrivilegesResponse::fromXContent, listener, emptySet()); + } + /** * Clears the cache in one or more realms. * See diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java index 160aa1fd82b0a..216085af78a38 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SecurityRequestConverters.java @@ -30,6 +30,7 @@ import org.elasticsearch.client.security.DeletePrivilegesRequest; import org.elasticsearch.client.security.DeleteRoleMappingRequest; import org.elasticsearch.client.security.DeleteRoleRequest; +import org.elasticsearch.client.security.HasPrivilegesRequest; import org.elasticsearch.client.security.DisableUserRequest; import org.elasticsearch.client.security.EnableUserRequest; import org.elasticsearch.client.security.GetRoleMappingsRequest; @@ -114,6 +115,12 @@ private static Request setUserEnabled(SetUserEnabledRequest setUserEnabledReques return request; } + static Request hasPrivileges(HasPrivilegesRequest hasPrivilegesRequest) throws IOException { + Request request = new Request(HttpGet.METHOD_NAME, "/_xpack/security/user/_has_privileges"); + request.setEntity(createEntity(hasPrivilegesRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request clearRealmCache(ClearRealmCacheRequest clearRealmCacheRequest) { RequestConverters.EndpointBuilder builder = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_xpack/security/realm"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowConfig.java new file mode 100644 index 0000000000000..eb9b5e80767db --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/FollowConfig.java @@ -0,0 +1,203 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +public class FollowConfig { + + static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); + static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); + static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); + static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); + static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); + static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); + static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); + static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); + static final ParseField MAX_RETRY_DELAY_FIELD = new ParseField("max_retry_delay"); + static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); + + private Integer maxReadRequestOperationCount; + private Integer maxOutstandingReadRequests; + private ByteSizeValue maxReadRequestSize; + private Integer maxWriteRequestOperationCount; + private ByteSizeValue maxWriteRequestSize; + private Integer maxOutstandingWriteRequests; + private Integer maxWriteBufferCount; + private ByteSizeValue maxWriteBufferSize; + private TimeValue maxRetryDelay; + private TimeValue readPollTimeout; + + FollowConfig() { + } + + public Integer getMaxReadRequestOperationCount() { + return maxReadRequestOperationCount; + } + + public void setMaxReadRequestOperationCount(Integer maxReadRequestOperationCount) { + this.maxReadRequestOperationCount = maxReadRequestOperationCount; + } + + public Integer getMaxOutstandingReadRequests() { + return maxOutstandingReadRequests; + } + + public void setMaxOutstandingReadRequests(Integer maxOutstandingReadRequests) { + this.maxOutstandingReadRequests = maxOutstandingReadRequests; + } + + public ByteSizeValue getMaxReadRequestSize() { + return maxReadRequestSize; + } + + public void setMaxReadRequestSize(ByteSizeValue maxReadRequestSize) { + this.maxReadRequestSize = maxReadRequestSize; + } + + public Integer getMaxWriteRequestOperationCount() { + return maxWriteRequestOperationCount; + } + + public void setMaxWriteRequestOperationCount(Integer maxWriteRequestOperationCount) { + this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; + } + + public ByteSizeValue getMaxWriteRequestSize() { + return maxWriteRequestSize; + } + + public void setMaxWriteRequestSize(ByteSizeValue maxWriteRequestSize) { + this.maxWriteRequestSize = maxWriteRequestSize; + } + + public Integer getMaxOutstandingWriteRequests() { + return maxOutstandingWriteRequests; + } + + public void setMaxOutstandingWriteRequests(Integer maxOutstandingWriteRequests) { + this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; + } + + public Integer getMaxWriteBufferCount() { + return maxWriteBufferCount; + } + + public void setMaxWriteBufferCount(Integer maxWriteBufferCount) { + this.maxWriteBufferCount = maxWriteBufferCount; + } + + public ByteSizeValue getMaxWriteBufferSize() { + return maxWriteBufferSize; + } + + public void setMaxWriteBufferSize(ByteSizeValue maxWriteBufferSize) { + this.maxWriteBufferSize = maxWriteBufferSize; + } + + public TimeValue getMaxRetryDelay() { + return maxRetryDelay; + } + + public void setMaxRetryDelay(TimeValue maxRetryDelay) { + this.maxRetryDelay = maxRetryDelay; + } + + public TimeValue getReadPollTimeout() { + return readPollTimeout; + } + + public void setReadPollTimeout(TimeValue readPollTimeout) { + this.readPollTimeout = readPollTimeout; + } + + void toXContentFragment(XContentBuilder builder, ToXContent.Params params) throws IOException { + if (maxReadRequestOperationCount != null) { + builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); + } + if (maxReadRequestSize != null) { + builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); + } + if (maxWriteRequestOperationCount != null) { + builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); + } + if (maxWriteRequestSize != null) { + builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); + } + if (maxWriteBufferCount != null) { + builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); + } + if (maxWriteBufferSize != null) { + builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); + } + if (maxOutstandingReadRequests != null) { + builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); + } + if (maxOutstandingWriteRequests != null) { + builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); + } + if (maxRetryDelay != null) { + builder.field(MAX_RETRY_DELAY_FIELD.getPreferredName(), maxRetryDelay.getStringRep()); + } + if (readPollTimeout != null) { + builder.field(READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + FollowConfig that = (FollowConfig) o; + return Objects.equals(maxReadRequestOperationCount, that.maxReadRequestOperationCount) && + Objects.equals(maxOutstandingReadRequests, that.maxOutstandingReadRequests) && + Objects.equals(maxReadRequestSize, that.maxReadRequestSize) && + Objects.equals(maxWriteRequestOperationCount, that.maxWriteRequestOperationCount) && + Objects.equals(maxWriteRequestSize, that.maxWriteRequestSize) && + Objects.equals(maxOutstandingWriteRequests, that.maxOutstandingWriteRequests) && + Objects.equals(maxWriteBufferCount, that.maxWriteBufferCount) && + Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && + Objects.equals(maxRetryDelay, that.maxRetryDelay) && + Objects.equals(readPollTimeout, that.readPollTimeout); + } + + @Override + public int hashCode() { + return Objects.hash( + maxReadRequestOperationCount, + maxOutstandingReadRequests, + maxReadRequestSize, + maxWriteRequestOperationCount, + maxWriteRequestSize, + maxOutstandingWriteRequests, + maxWriteBufferCount, + maxWriteBufferSize, + maxRetryDelay, + readPollTimeout + ); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PutFollowRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PutFollowRequest.java index f3ea0ae2e9bfe..98e9d224564cf 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PutFollowRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/PutFollowRequest.java @@ -21,43 +21,21 @@ import org.elasticsearch.client.Validatable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; import java.util.Objects; -public final class PutFollowRequest implements Validatable, ToXContentObject { +public final class PutFollowRequest extends FollowConfig implements Validatable, ToXContentObject { static final ParseField REMOTE_CLUSTER_FIELD = new ParseField("remote_cluster"); static final ParseField LEADER_INDEX_FIELD = new ParseField("leader_index"); static final ParseField FOLLOWER_INDEX_FIELD = new ParseField("follower_index"); - static final ParseField MAX_READ_REQUEST_OPERATION_COUNT = new ParseField("max_read_request_operation_count"); - static final ParseField MAX_READ_REQUEST_SIZE = new ParseField("max_read_request_size"); - static final ParseField MAX_OUTSTANDING_READ_REQUESTS = new ParseField("max_outstanding_read_requests"); - static final ParseField MAX_WRITE_REQUEST_OPERATION_COUNT = new ParseField("max_write_request_operation_count"); - static final ParseField MAX_WRITE_REQUEST_SIZE = new ParseField("max_write_request_size"); - static final ParseField MAX_OUTSTANDING_WRITE_REQUESTS = new ParseField("max_outstanding_write_requests"); - static final ParseField MAX_WRITE_BUFFER_COUNT = new ParseField("max_write_buffer_count"); - static final ParseField MAX_WRITE_BUFFER_SIZE = new ParseField("max_write_buffer_size"); - static final ParseField MAX_RETRY_DELAY_FIELD = new ParseField("max_retry_delay"); - static final ParseField READ_POLL_TIMEOUT = new ParseField("read_poll_timeout"); private final String remoteCluster; private final String leaderIndex; private final String followerIndex; - private Integer maxReadRequestOperationCount; - private Integer maxOutstandingReadRequests; - private ByteSizeValue maxReadRequestSize; - private Integer maxWriteRequestOperationCount; - private ByteSizeValue maxWriteRequestSize; - private Integer maxOutstandingWriteRequests; - private Integer maxWriteBufferCount; - private ByteSizeValue maxWriteBufferSize; - private TimeValue maxRetryDelay; - private TimeValue readPollTimeout; public PutFollowRequest(String remoteCluster, String leaderIndex, String followerIndex) { this.remoteCluster = Objects.requireNonNull(remoteCluster, "remoteCluster"); @@ -71,36 +49,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(REMOTE_CLUSTER_FIELD.getPreferredName(), remoteCluster); builder.field(LEADER_INDEX_FIELD.getPreferredName(), leaderIndex); builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); - if (maxReadRequestOperationCount != null) { - builder.field(MAX_READ_REQUEST_OPERATION_COUNT.getPreferredName(), maxReadRequestOperationCount); - } - if (maxReadRequestSize != null) { - builder.field(MAX_READ_REQUEST_SIZE.getPreferredName(), maxReadRequestSize.getStringRep()); - } - if (maxWriteRequestOperationCount != null) { - builder.field(MAX_WRITE_REQUEST_OPERATION_COUNT.getPreferredName(), maxWriteRequestOperationCount); - } - if (maxWriteRequestSize != null) { - builder.field(MAX_WRITE_REQUEST_SIZE.getPreferredName(), maxWriteRequestSize.getStringRep()); - } - if (maxWriteBufferCount != null) { - builder.field(MAX_WRITE_BUFFER_COUNT.getPreferredName(), maxWriteBufferCount); - } - if (maxWriteBufferSize != null) { - builder.field(MAX_WRITE_BUFFER_SIZE.getPreferredName(), maxWriteBufferSize.getStringRep()); - } - if (maxOutstandingReadRequests != null) { - builder.field(MAX_OUTSTANDING_READ_REQUESTS.getPreferredName(), maxOutstandingReadRequests); - } - if (maxOutstandingWriteRequests != null) { - builder.field(MAX_OUTSTANDING_WRITE_REQUESTS.getPreferredName(), maxOutstandingWriteRequests); - } - if (maxRetryDelay != null) { - builder.field(MAX_RETRY_DELAY_FIELD.getPreferredName(), maxRetryDelay.getStringRep()); - } - if (readPollTimeout != null) { - builder.field(READ_POLL_TIMEOUT.getPreferredName(), readPollTimeout.getStringRep()); - } + toXContentFragment(builder, params); builder.endObject(); return builder; } @@ -117,122 +66,24 @@ public String getFollowerIndex() { return followerIndex; } - public Integer getMaxReadRequestOperationCount() { - return maxReadRequestOperationCount; - } - - public void setMaxReadRequestOperationCount(Integer maxReadRequestOperationCount) { - this.maxReadRequestOperationCount = maxReadRequestOperationCount; - } - - public Integer getMaxOutstandingReadRequests() { - return maxOutstandingReadRequests; - } - - public void setMaxOutstandingReadRequests(Integer maxOutstandingReadRequests) { - this.maxOutstandingReadRequests = maxOutstandingReadRequests; - } - - public ByteSizeValue getMaxReadRequestSize() { - return maxReadRequestSize; - } - - public void setMaxReadRequestSize(ByteSizeValue maxReadRequestSize) { - this.maxReadRequestSize = maxReadRequestSize; - } - - public Integer getMaxWriteRequestOperationCount() { - return maxWriteRequestOperationCount; - } - - public void setMaxWriteRequestOperationCount(Integer maxWriteRequestOperationCount) { - this.maxWriteRequestOperationCount = maxWriteRequestOperationCount; - } - - public ByteSizeValue getMaxWriteRequestSize() { - return maxWriteRequestSize; - } - - public void setMaxWriteRequestSize(ByteSizeValue maxWriteRequestSize) { - this.maxWriteRequestSize = maxWriteRequestSize; - } - - public Integer getMaxOutstandingWriteRequests() { - return maxOutstandingWriteRequests; - } - - public void setMaxOutstandingWriteRequests(Integer maxOutstandingWriteRequests) { - this.maxOutstandingWriteRequests = maxOutstandingWriteRequests; - } - - public Integer getMaxWriteBufferCount() { - return maxWriteBufferCount; - } - - public void setMaxWriteBufferCount(Integer maxWriteBufferCount) { - this.maxWriteBufferCount = maxWriteBufferCount; - } - - public ByteSizeValue getMaxWriteBufferSize() { - return maxWriteBufferSize; - } - - public void setMaxWriteBufferSize(ByteSizeValue maxWriteBufferSize) { - this.maxWriteBufferSize = maxWriteBufferSize; - } - - public TimeValue getMaxRetryDelay() { - return maxRetryDelay; - } - - public void setMaxRetryDelay(TimeValue maxRetryDelay) { - this.maxRetryDelay = maxRetryDelay; - } - - public TimeValue getReadPollTimeout() { - return readPollTimeout; - } - - public void setReadPollTimeout(TimeValue readPollTimeout) { - this.readPollTimeout = readPollTimeout; - } - @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; PutFollowRequest that = (PutFollowRequest) o; return Objects.equals(remoteCluster, that.remoteCluster) && Objects.equals(leaderIndex, that.leaderIndex) && - Objects.equals(followerIndex, that.followerIndex) && - Objects.equals(maxReadRequestOperationCount, that.maxReadRequestOperationCount) && - Objects.equals(maxOutstandingReadRequests, that.maxOutstandingReadRequests) && - Objects.equals(maxReadRequestSize, that.maxReadRequestSize) && - Objects.equals(maxWriteRequestOperationCount, that.maxWriteRequestOperationCount) && - Objects.equals(maxWriteRequestSize, that.maxWriteRequestSize) && - Objects.equals(maxOutstandingWriteRequests, that.maxOutstandingWriteRequests) && - Objects.equals(maxWriteBufferCount, that.maxWriteBufferCount) && - Objects.equals(maxWriteBufferSize, that.maxWriteBufferSize) && - Objects.equals(maxRetryDelay, that.maxRetryDelay) && - Objects.equals(readPollTimeout, that.readPollTimeout); + Objects.equals(followerIndex, that.followerIndex); } @Override public int hashCode() { return Objects.hash( + super.hashCode(), remoteCluster, leaderIndex, - followerIndex, - maxReadRequestOperationCount, - maxOutstandingReadRequests, - maxReadRequestSize, - maxWriteRequestOperationCount, - maxWriteRequestSize, - maxOutstandingWriteRequests, - maxWriteBufferCount, - maxWriteBufferSize, - maxRetryDelay, - readPollTimeout + followerIndex ); } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ResumeFollowRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ResumeFollowRequest.java new file mode 100644 index 0000000000000..d9ceb666afd2f --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ResumeFollowRequest.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.client.ccr.PutFollowRequest.FOLLOWER_INDEX_FIELD; + +public final class ResumeFollowRequest extends FollowConfig implements Validatable, ToXContentObject { + + private final String followerIndex; + + public ResumeFollowRequest(String followerIndex) { + this.followerIndex = Objects.requireNonNull(followerIndex, "followerIndex"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(FOLLOWER_INDEX_FIELD.getPreferredName(), followerIndex); + toXContentFragment(builder, params); + builder.endObject(); + return builder; + } + + public String getFollowerIndex() { + return followerIndex; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + ResumeFollowRequest that = (ResumeFollowRequest) o; + return Objects.equals(followerIndex, that.followerIndex); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), followerIndex); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/UnfollowRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/UnfollowRequest.java new file mode 100644 index 0000000000000..f3fb607006d90 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/UnfollowRequest.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.client.Validatable; + +import java.util.Objects; + +public final class UnfollowRequest implements Validatable { + + private final String followerIndex; + + public UnfollowRequest(String followerIndex) { + this.followerIndex = Objects.requireNonNull(followerIndex); + } + + public String getFollowerIndex() { + return followerIndex; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java new file mode 100644 index 0000000000000..8ec5e79993cd8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsRequest.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.client.core.TermVectorsRequest.createFromTemplate; + +public class MultiTermVectorsRequest implements ToXContentObject, Validatable { + + private List requests = new ArrayList<>(); + + /** + * Constructs an empty MultiTermVectorsRequest + * After that use {@code add} method to add individual {@code TermVectorsRequest} to it. + */ + public MultiTermVectorsRequest() {}; + + /** + * Constructs a MultiTermVectorsRequest from the given document ids + * and a template {@code TermVectorsRequest}. + * Used when individual requests share the same index, type and other settings. + * @param ids - ids of documents for which term vectors are requested + * @param template - a template {@code TermVectorsRequest} that allows to set all + * settings only once for all requests. + */ + public MultiTermVectorsRequest(String[] ids, TermVectorsRequest template) { + for (String id : ids) { + TermVectorsRequest request = createFromTemplate(template, id); + requests.add(request); + } + } + + /** + * Adds another {@code TermVectorsRequest} to this {@code MultiTermVectorsRequest} + * @param request - {@code TermVectorsRequest} to add + */ + public void add(TermVectorsRequest request) { + requests.add(request); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.startArray("docs"); + for (TermVectorsRequest request : requests) { + request.toXContent(builder, params); + } + builder.endArray(); + builder.endObject(); + return builder; + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsResponse.java new file mode 100644 index 0000000000000..0a2974a8aa166 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/MultiTermVectorsResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +public class MultiTermVectorsResponse { + private final List responses; + + public MultiTermVectorsResponse(List responses) { + this.responses = responses; + } + + private static ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("multi_term_vectors", true, + args -> { + // as the response comes from server, we are sure that args[0] will be a list of TermVectorsResponse + @SuppressWarnings("unchecked") List termVectorsResponsesList = (List) args[0]; + return new MultiTermVectorsResponse(termVectorsResponsesList); + } + ); + + static { + PARSER.declareObjectArray(constructorArg(), (p,c) -> TermVectorsResponse.fromXContent(p), new ParseField("docs")); + } + + public static MultiTermVectorsResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + /** + * Returns the list of {@code TermVectorsResponse} for this {@code MultiTermVectorsResponse} + */ + public List getTermVectorsResponses() { + return responses; + } + + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof MultiTermVectorsResponse)) return false; + MultiTermVectorsResponse other = (MultiTermVectorsResponse) obj; + return Objects.equals(responses, other.responses); + } + + @Override + public int hashCode() { + return Objects.hash(responses); + } + +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java index 579ab52185198..30e97d3fc9ca8 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/TermVectorsRequest.java @@ -72,6 +72,29 @@ public TermVectorsRequest(String index, String type, XContentBuilder docBuilder) this.docBuilder = docBuilder; } + + /** + * Constructs a new TermVectorRequest from a template + * using the provided document id + * @param template - a term vector request served as a template + * @param id - id of the requested document + */ + static TermVectorsRequest createFromTemplate(TermVectorsRequest template, String id) { + TermVectorsRequest request = new TermVectorsRequest(template.getIndex(), template.getType(), id); + request.realtime = template.getRealtime(); + request.requestPositions = template.requestPositions; + request.requestPayloads = template.requestPayloads; + request.requestOffsets = template.requestOffsets; + request.requestFieldStatistics = template.requestFieldStatistics; + request.requestTermStatistics = template.requestTermStatistics; + if (template.routing != null) request.setRouting(template.getRouting()); + if (template.preference != null) request.setPreference(template.getPreference()); + if (template.fields != null) request.setFields(template.getFields()); + if (template.perFieldAnalyzer != null) request.setPerFieldAnalyzer(template.perFieldAnalyzer); + if (template.filterSettings != null) request.setFilterSettings(template.filterSettings); + return request; + } + /** * Returns the index of the request */ @@ -147,13 +170,6 @@ public void setPerFieldAnalyzer(Map perFieldAnalyzer) { this.perFieldAnalyzer = perFieldAnalyzer; } - /** - * Sets an artifical document on what to request _termvectors - */ - public void setDoc(XContentBuilder docBuilder) { - this.docBuilder = docBuilder; - } - /** * Sets conditions for terms filtering */ @@ -201,6 +217,9 @@ public boolean getRealtime() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + builder.field("_index", index); + builder.field("_type", type); + if (id != null) builder.field("_id", id); // set values only when different from defaults if (requestPositions == false) builder.field("positions", false); if (requestPayloads == false) builder.field("payloads", false); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteCalendarJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteCalendarJobRequest.java new file mode 100644 index 0000000000000..a1bd3a412c722 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/DeleteCalendarJobRequest.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; + +import java.security.InvalidParameterException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Request class for removing Machine Learning Jobs from an existing calendar + */ +public class DeleteCalendarJobRequest extends ActionRequest { + + private final List jobIds; + private final String calendarId; + + /** + * Create a new request referencing an existing Calendar and which JobIds to remove + * from it. + * + * @param calendarId The non-null ID of the calendar + * @param jobIds JobIds to remove from the calendar, cannot be empty, or contain null values. + * It can be a list of jobs or groups. + */ + public DeleteCalendarJobRequest(String calendarId, String... jobIds) { + this.calendarId = Objects.requireNonNull(calendarId, "[calendar_id] must not be null."); + if (jobIds.length == 0) { + throw new InvalidParameterException("jobIds must not be empty."); + } + if (Arrays.stream(jobIds).anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values."); + } + this.jobIds = Arrays.asList(jobIds); + } + + public List getJobIds() { + return jobIds; + } + + public String getCalendarId() { + return calendarId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, calendarId); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + DeleteCalendarJobRequest that = (DeleteCalendarJobRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(calendarId, that.calendarId); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsRequest.java new file mode 100644 index 0000000000000..5730e132df1a1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsRequest.java @@ -0,0 +1,169 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.calendars.Calendar; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Get the Scheduled Events for a Calendar + */ +public class GetCalendarEventsRequest extends ActionRequest implements ToXContentObject { + + public static final ParseField START = new ParseField("start"); + public static final ParseField END = new ParseField("end"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("get_calendar_events_request", a -> new GetCalendarEventsRequest((String)a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Calendar.ID); + PARSER.declareString(GetCalendarEventsRequest::setStart, START); + PARSER.declareString(GetCalendarEventsRequest::setEnd, END); + PARSER.declareString(GetCalendarEventsRequest::setJobId, Job.ID); + PARSER.declareObject(GetCalendarEventsRequest::setPageParams, PageParams.PARSER, PageParams.PAGE); + } + + private final String calendarId; + private String start; + private String end; + private String jobId; + private PageParams pageParams; + + /** + * Create a new request to get the ScheduledEvents for the given calendarId. + * + * @param calendarId The ID of the calendar. + * Can be `_all` to get ALL ScheduledEvents for all calendars. + */ + public GetCalendarEventsRequest(String calendarId) { + this.calendarId = Objects.requireNonNull(calendarId, "[calendar_id] must not be null."); + } + + public String getCalendarId() { + return calendarId; + } + + public PageParams getPageParams() { + return pageParams; + } + + /** + * The paging parameters for the gathered ScheduledEvents + * @param pageParams The desired paging params + */ + public void setPageParams(PageParams pageParams) { + this.pageParams = pageParams; + } + + public String getStart() { + return start; + } + + /** + * Specifies to get events with timestamps after this time. + * + * @param start String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setStart(String start) { + this.start = start; + } + + public String getEnd() { + return end; + } + + /** + * Specifies to get events with timestamps earlier than this time. + * + * @param end String representation of a timestamp; may be an epoch seconds, epoch millis or an ISO string + */ + public void setEnd(String end) { + this.end = end; + } + + public String getJobId() { + return jobId; + } + + /** + * The jobId for which to get the ScheduledEvents. When this option is used calendarId must be `_all` + * @param jobId The job for which to get the events. + */ + public void setJobId(String jobId) { + this.jobId = jobId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Calendar.ID.getPreferredName(), calendarId); + if (start != null) { + builder.field(START.getPreferredName(), start); + } + if (end != null) { + builder.field(END.getPreferredName(), end); + } + if (jobId != null) { + builder.field(Job.ID.getPreferredName(), jobId); + } + if (pageParams != null) { + builder.field(PageParams.PAGE.getPreferredName(), pageParams); + } + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(calendarId, start, end, jobId, pageParams); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + GetCalendarEventsRequest other = (GetCalendarEventsRequest) obj; + return Objects.equals(calendarId, other.calendarId) + && Objects.equals(pageParams, other.pageParams) + && Objects.equals(start, other.start) + && Objects.equals(end, other.end) + && Objects.equals(jobId, other.jobId); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsResponse.java new file mode 100644 index 0000000000000..8ddd874681465 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/GetCalendarEventsResponse.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.calendars.ScheduledEvent; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Contains a {@link List} of the found {@link ScheduledEvent} objects and the total count found + */ +public class GetCalendarEventsResponse extends AbstractResultResponse { + + public static final ParseField RESULTS_FIELD = new ParseField("events"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("calendar_events_response", true, + a -> new GetCalendarEventsResponse((List) a[0], (long) a[1])); + + static { + PARSER.declareObjectArray(constructorArg(), ScheduledEvent.PARSER, RESULTS_FIELD); + PARSER.declareLong(constructorArg(), COUNT); + } + + GetCalendarEventsResponse(List events, long count) { + super(RESULTS_FIELD, events, count); + } + + /** + * The collection of {@link ScheduledEvent} objects found in the query + */ + public List events() { + return results; + } + + public static GetCalendarEventsResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public int hashCode() { + return Objects.hash(results, count); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + + GetCalendarEventsResponse other = (GetCalendarEventsResponse) obj; + return Objects.equals(results, other.results) && count == other.count; + } + + @Override + public final String toString() { + return Strings.toString(this); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostCalendarEventRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostCalendarEventRequest.java new file mode 100644 index 0000000000000..2c43ec9ab7769 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostCalendarEventRequest.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.calendars.Calendar; +import org.elasticsearch.client.ml.calendars.ScheduledEvent; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Request to add a ScheduledEvent to a Machine Learning calendar + */ +public class PostCalendarEventRequest extends ActionRequest implements ToXContentObject { + + private final String calendarId; + private final List scheduledEvents; + + public static final String INCLUDE_CALENDAR_ID_KEY = "include_calendar_id"; + public static final ParseField EVENTS = new ParseField("events"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("post_calendar_event_request", + a -> new PostCalendarEventRequest((String)a[0], (List)a[1])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Calendar.ID); + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), + (p, c) -> ScheduledEvent.PARSER.apply(p, null), EVENTS); + } + public static final MapParams EXCLUDE_CALENDAR_ID_PARAMS = + new MapParams(Collections.singletonMap(INCLUDE_CALENDAR_ID_KEY, Boolean.toString(false))); + + /** + * Create a new PostCalendarEventRequest with an existing non-null calendarId and a list of Scheduled events + * + * @param calendarId The ID of the calendar, must be non-null + * @param scheduledEvents The non-null, non-empty, list of {@link ScheduledEvent} objects to add to the calendar + */ + public PostCalendarEventRequest(String calendarId, List scheduledEvents) { + this.calendarId = Objects.requireNonNull(calendarId, "[calendar_id] must not be null."); + this.scheduledEvents = Objects.requireNonNull(scheduledEvents, "[events] must not be null."); + if (scheduledEvents.isEmpty()) { + throw new IllegalArgumentException("At least 1 event is required"); + } + } + + public String getCalendarId() { + return calendarId; + } + + public List getScheduledEvents() { + return scheduledEvents; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (params.paramAsBoolean(INCLUDE_CALENDAR_ID_KEY, true)) { + builder.field(Calendar.ID.getPreferredName(), calendarId); + } + builder.field(EVENTS.getPreferredName(), scheduledEvents); + builder.endObject(); + return builder; + } + + @Override + public int hashCode() { + return Objects.hash(calendarId, scheduledEvents); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + PostCalendarEventRequest other = (PostCalendarEventRequest) obj; + return Objects.equals(calendarId, other.calendarId) && Objects.equals(scheduledEvents, other.scheduledEvents); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostCalendarEventResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostCalendarEventResponse.java new file mode 100644 index 0000000000000..56e3cdce24f1b --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PostCalendarEventResponse.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ml.calendars.ScheduledEvent; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * Response to adding ScheduledEvent(s) to a Machine Learning calendar + */ +public class PostCalendarEventResponse extends ActionResponse implements ToXContentObject { + + private final List scheduledEvents; + public static final ParseField EVENTS = new ParseField("events"); + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("post_calendar_event_response", + true, + a -> new PostCalendarEventResponse((List)a[0])); + + static { + PARSER.declareObjectArray(ConstructingObjectParser.constructorArg(), + (p, c) -> ScheduledEvent.PARSER.apply(p, null), EVENTS); + } + + public static PostCalendarEventResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * Create a new PostCalendarEventResponse containing the scheduled Events + * + * @param scheduledEvents The list of {@link ScheduledEvent} objects + */ + public PostCalendarEventResponse(List scheduledEvents) { + this.scheduledEvents = scheduledEvents; + } + + public List getScheduledEvents() { + return scheduledEvents; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(EVENTS.getPreferredName(), scheduledEvents); + builder.endObject(); + return builder; + } + + @Override + public int hashCode(){ + return Objects.hash(scheduledEvents); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || getClass() != obj.getClass()) { + return false; + } + PostCalendarEventResponse other = (PostCalendarEventResponse) obj; + return Objects.equals(scheduledEvents, other.scheduledEvents); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutCalendarJobRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutCalendarJobRequest.java new file mode 100644 index 0000000000000..5ec3798689345 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/PutCalendarJobRequest.java @@ -0,0 +1,88 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; + +import java.security.InvalidParameterException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * Request class for adding Machine Learning Jobs to an existing calendar + */ +public class PutCalendarJobRequest extends ActionRequest { + + private final List jobIds; + private final String calendarId; + + /** + * Create a new request referencing an existing Calendar and which JobIds to add + * to it. + * + * @param calendarId The non-null ID of the calendar + * @param jobIds JobIds to add to the calendar, cannot be empty, or contain null values. + * It can be a list of jobs or groups. + */ + public PutCalendarJobRequest(String calendarId, String... jobIds) { + this.calendarId = Objects.requireNonNull(calendarId, "[calendar_id] must not be null."); + if (jobIds.length == 0) { + throw new InvalidParameterException("jobIds must not be empty."); + } + if (Arrays.stream(jobIds).anyMatch(Objects::isNull)) { + throw new NullPointerException("jobIds must not contain null values."); + } + this.jobIds = Arrays.asList(jobIds); + } + + public List getJobIds() { + return jobIds; + } + + public String getCalendarId() { + return calendarId; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public int hashCode() { + return Objects.hash(jobIds, calendarId); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + PutCalendarJobRequest that = (PutCalendarJobRequest) other; + return Objects.equals(jobIds, that.jobIds) && + Objects.equals(calendarId, that.calendarId); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/RevertModelSnapshotRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/RevertModelSnapshotRequest.java new file mode 100644 index 0000000000000..3a38cd86a0565 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/RevertModelSnapshotRequest.java @@ -0,0 +1,120 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.process.ModelSnapshot; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to revert to a specific model snapshot for a given job + */ +public class RevertModelSnapshotRequest extends ActionRequest implements ToXContentObject { + + + public static final ParseField DELETE_INTERVENING = new ParseField("delete_intervening_results"); + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "revert_model_snapshots_request", a -> new RevertModelSnapshotRequest((String) a[0], (String) a[1])); + + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), ModelSnapshot.SNAPSHOT_ID); + PARSER.declareBoolean(RevertModelSnapshotRequest::setDeleteInterveningResults, DELETE_INTERVENING); + } + + private final String jobId; + private final String snapshotId; + private Boolean deleteInterveningResults; + + /** + * Constructs a request to revert to a given model snapshot + * @param jobId id of the job for which to revert the model snapshot + * @param snapshotId id of the snapshot to which to revert + */ + public RevertModelSnapshotRequest(String jobId, String snapshotId) { + this.jobId = Objects.requireNonNull(jobId, "[" + Job.ID + "] must not be null"); + this.snapshotId = Objects.requireNonNull(snapshotId, "[" + ModelSnapshot.SNAPSHOT_ID + "] must not be null"); + } + + public String getJobId() { + return jobId; + } + + public String getSnapshotId() { + return snapshotId; + } + + public Boolean getDeleteInterveningResults() { + return deleteInterveningResults; + } + + /** + * Sets the request flag that indicates whether or not intervening results should be deleted. + * @param deleteInterveningResults Flag that indicates whether or not intervening results should be deleted. + */ + public void setDeleteInterveningResults(Boolean deleteInterveningResults) { + this.deleteInterveningResults = deleteInterveningResults; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(ModelSnapshot.SNAPSHOT_ID.getPreferredName(), snapshotId); + if (deleteInterveningResults != null) { + builder.field(DELETE_INTERVENING.getPreferredName(), deleteInterveningResults); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RevertModelSnapshotRequest request = (RevertModelSnapshotRequest) obj; + return Objects.equals(jobId, request.jobId) + && Objects.equals(snapshotId, request.snapshotId) + && Objects.equals(deleteInterveningResults, request.deleteInterveningResults); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, snapshotId, deleteInterveningResults); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/RevertModelSnapshotResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/RevertModelSnapshotResponse.java new file mode 100644 index 0000000000000..575f38043b3c8 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/RevertModelSnapshotResponse.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ml.job.process.ModelSnapshot; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +import java.util.Objects; + +/** + * A response containing the reverted model snapshot + */ +public class RevertModelSnapshotResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField MODEL = new ParseField("model"); + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("revert_model_snapshot_response", true, + a -> new RevertModelSnapshotResponse((ModelSnapshot.Builder) a[0])); + + static { + PARSER.declareObject(ConstructingObjectParser.constructorArg(), ModelSnapshot.PARSER, MODEL); + } + + public static RevertModelSnapshotResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + public RevertModelSnapshotResponse(ModelSnapshot.Builder modelSnapshot) { + this.model = modelSnapshot.build(); + } + + private final ModelSnapshot model; + + /** + * Get full information about the reverted model snapshot + * @return the reverted model snapshot. + */ + public ModelSnapshot getModel() { + return model; + } + + @Override + public int hashCode() { + return Objects.hash(model); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + RevertModelSnapshotResponse other = (RevertModelSnapshotResponse) obj; + return Objects.equals(model, other.model); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + if (model != null) { + builder.field(MODEL.getPreferredName(), model); + } + builder.endObject(); + return builder; + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateModelSnapshotRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateModelSnapshotRequest.java new file mode 100644 index 0000000000000..b2b6417ab2edb --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateModelSnapshotRequest.java @@ -0,0 +1,135 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.client.ml.job.process.ModelSnapshot; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * A request to update information about an existing model snapshot for a given job + */ +public class UpdateModelSnapshotRequest extends ActionRequest implements ToXContentObject { + + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "update_model_snapshot_request", a -> new UpdateModelSnapshotRequest((String) a[0], (String) a[1])); + + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Job.ID); + PARSER.declareString(ConstructingObjectParser.constructorArg(), ModelSnapshot.SNAPSHOT_ID); + PARSER.declareStringOrNull(UpdateModelSnapshotRequest::setDescription, ModelSnapshot.DESCRIPTION); + PARSER.declareBoolean(UpdateModelSnapshotRequest::setRetain, ModelSnapshot.RETAIN); + } + + private final String jobId; + private String snapshotId; + private String description; + private Boolean retain; + + /** + * Constructs a request to update information for a snapshot of given job + * @param jobId id of the job from which to retrieve results + * @param snapshotId id of the snapshot from which to retrieve results + */ + public UpdateModelSnapshotRequest(String jobId, String snapshotId) { + this.jobId = Objects.requireNonNull(jobId, "[" + Job.ID + "] must not be null"); + this.snapshotId = Objects.requireNonNull(snapshotId, "[" + ModelSnapshot.SNAPSHOT_ID + "] must not be null"); + } + + public String getJobId() { + return jobId; + } + + public String getSnapshotId() { + return snapshotId; + } + + public String getDescription() { + return description; + } + + /** + * The new description of the snapshot. + * @param description the updated snapshot description + */ + public void setDescription(String description) { + this.description = description; + } + + public Boolean getRetain() { + return retain; + } + + /** + * The new value of the "retain" property of the snapshot + * @param retain the updated retain property + */ + public void setRetain(boolean retain) { + this.retain = retain; + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Job.ID.getPreferredName(), jobId); + builder.field(ModelSnapshot.SNAPSHOT_ID.getPreferredName(), snapshotId); + if (description != null) { + builder.field(ModelSnapshot.DESCRIPTION.getPreferredName(), description); + } + if (retain != null) { + builder.field(ModelSnapshot.RETAIN.getPreferredName(), retain); + } + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + UpdateModelSnapshotRequest request = (UpdateModelSnapshotRequest) obj; + return Objects.equals(jobId, request.jobId) + && Objects.equals(snapshotId, request.snapshotId) + && Objects.equals(description, request.description) + && Objects.equals(retain, request.retain); + } + + @Override + public int hashCode() { + return Objects.hash(jobId, snapshotId, description, retain); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateModelSnapshotResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateModelSnapshotResponse.java new file mode 100644 index 0000000000000..049a24c02d10a --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/UpdateModelSnapshotResponse.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.ml.job.process.ModelSnapshot; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Objects; + +/** + * A response acknowledging the update of information for an existing model snapshot for a given job + */ +public class UpdateModelSnapshotResponse extends ActionResponse implements ToXContentObject { + + private static final ParseField ACKNOWLEDGED = new ParseField("acknowledged"); + private static final ParseField MODEL = new ParseField("model"); + + public UpdateModelSnapshotResponse(boolean acknowledged, ModelSnapshot.Builder modelSnapshot) { + this.acknowledged = acknowledged; + this.model = modelSnapshot.build(); + } + + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("update_model_snapshot_response", true, + a -> new UpdateModelSnapshotResponse((Boolean) a[0], ((ModelSnapshot.Builder) a[1]))); + + static { + PARSER.declareBoolean(ConstructingObjectParser.constructorArg(), ACKNOWLEDGED); + PARSER.declareObject(ConstructingObjectParser.constructorArg(), ModelSnapshot.PARSER, MODEL); + } + + public static UpdateModelSnapshotResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + private final Boolean acknowledged; + private final ModelSnapshot model; + + /** + * Get the action acknowledgement + * @return a {@code boolean} that indicates whether the model snapshot was updated successfully. + */ + public Boolean getAcknowledged() { + return acknowledged; + } + + /** + * Get the updated snapshot of the model + * @return the updated model snapshot. + */ + public ModelSnapshot getModel() { + return model; + } + + @Override + public int hashCode() { + return Objects.hash(acknowledged, model); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + if (acknowledged != null) { + builder.field(ACKNOWLEDGED.getPreferredName(), acknowledged); + } + if (model != null) { + builder.field(MODEL.getPreferredName(), model); + } + builder.endObject(); + return builder; + } + + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + UpdateModelSnapshotResponse request = (UpdateModelSnapshotResponse) obj; + return Objects.equals(acknowledged, request.acknowledged) + && Objects.equals(model, request.model); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java index 9b759599dda3c..f3049be2b5e5b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/config/AnalysisConfig.java @@ -60,8 +60,6 @@ public class AnalysisConfig implements ToXContentObject { public static final ParseField SUMMARY_COUNT_FIELD_NAME = new ParseField("summary_count_field_name"); public static final ParseField DETECTORS = new ParseField("detectors"); public static final ParseField INFLUENCERS = new ParseField("influencers"); - public static final ParseField OVERLAPPING_BUCKETS = new ParseField("overlapping_buckets"); - public static final ParseField RESULT_FINALIZATION_WINDOW = new ParseField("result_finalization_window"); public static final ParseField MULTIVARIATE_BY_FIELDS = new ParseField("multivariate_by_fields"); @SuppressWarnings("unchecked") @@ -84,8 +82,6 @@ public class AnalysisConfig implements ToXContentObject { builder.setLatency(TimeValue.parseTimeValue(val, LATENCY.getPreferredName())), LATENCY); PARSER.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME); PARSER.declareStringArray(Builder::setInfluencers, INFLUENCERS); - PARSER.declareBoolean(Builder::setOverlappingBuckets, OVERLAPPING_BUCKETS); - PARSER.declareLong(Builder::setResultFinalizationWindow, RESULT_FINALIZATION_WINDOW); PARSER.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS); } @@ -100,14 +96,11 @@ public class AnalysisConfig implements ToXContentObject { private final String summaryCountFieldName; private final List detectors; private final List influencers; - private final Boolean overlappingBuckets; - private final Long resultFinalizationWindow; private final Boolean multivariateByFields; private AnalysisConfig(TimeValue bucketSpan, String categorizationFieldName, List categorizationFilters, CategorizationAnalyzerConfig categorizationAnalyzerConfig, TimeValue latency, String summaryCountFieldName, - List detectors, List influencers, Boolean overlappingBuckets, Long resultFinalizationWindow, - Boolean multivariateByFields) { + List detectors, List influencers, Boolean multivariateByFields) { this.detectors = Collections.unmodifiableList(detectors); this.bucketSpan = bucketSpan; this.latency = latency; @@ -116,8 +109,6 @@ private AnalysisConfig(TimeValue bucketSpan, String categorizationFieldName, Lis this.categorizationFilters = categorizationFilters == null ? null : Collections.unmodifiableList(categorizationFilters); this.summaryCountFieldName = summaryCountFieldName; this.influencers = Collections.unmodifiableList(influencers); - this.overlappingBuckets = overlappingBuckets; - this.resultFinalizationWindow = resultFinalizationWindow; this.multivariateByFields = multivariateByFields; } @@ -177,14 +168,6 @@ public List getInfluencers() { return influencers; } - public Boolean getOverlappingBuckets() { - return overlappingBuckets; - } - - public Long getResultFinalizationWindow() { - return resultFinalizationWindow; - } - public Boolean getMultivariateByFields() { return multivariateByFields; } @@ -255,12 +238,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); builder.field(INFLUENCERS.getPreferredName(), influencers); - if (overlappingBuckets != null) { - builder.field(OVERLAPPING_BUCKETS.getPreferredName(), overlappingBuckets); - } - if (resultFinalizationWindow != null) { - builder.field(RESULT_FINALIZATION_WINDOW.getPreferredName(), resultFinalizationWindow); - } if (multivariateByFields != null) { builder.field(MULTIVARIATE_BY_FIELDS.getPreferredName(), multivariateByFields); } @@ -287,8 +264,6 @@ public boolean equals(Object object) { Objects.equals(summaryCountFieldName, that.summaryCountFieldName) && Objects.equals(detectors, that.detectors) && Objects.equals(influencers, that.influencers) && - Objects.equals(overlappingBuckets, that.overlappingBuckets) && - Objects.equals(resultFinalizationWindow, that.resultFinalizationWindow) && Objects.equals(multivariateByFields, that.multivariateByFields); } @@ -296,8 +271,7 @@ public boolean equals(Object object) { public int hashCode() { return Objects.hash( bucketSpan, categorizationFieldName, categorizationFilters, categorizationAnalyzerConfig, latency, - summaryCountFieldName, detectors, influencers, overlappingBuckets, resultFinalizationWindow, - multivariateByFields); + summaryCountFieldName, detectors, influencers, multivariateByFields); } public static Builder builder(List detectors) { @@ -314,8 +288,6 @@ public static class Builder { private CategorizationAnalyzerConfig categorizationAnalyzerConfig; private String summaryCountFieldName; private List influencers = new ArrayList<>(); - private Boolean overlappingBuckets; - private Long resultFinalizationWindow; private Boolean multivariateByFields; public Builder(List detectors) { @@ -332,8 +304,6 @@ public Builder(AnalysisConfig analysisConfig) { this.categorizationAnalyzerConfig = analysisConfig.categorizationAnalyzerConfig; this.summaryCountFieldName = analysisConfig.summaryCountFieldName; this.influencers = new ArrayList<>(analysisConfig.influencers); - this.overlappingBuckets = analysisConfig.overlappingBuckets; - this.resultFinalizationWindow = analysisConfig.resultFinalizationWindow; this.multivariateByFields = analysisConfig.multivariateByFields; } @@ -391,16 +361,6 @@ public Builder setInfluencers(List influencers) { return this; } - public Builder setOverlappingBuckets(Boolean overlappingBuckets) { - this.overlappingBuckets = overlappingBuckets; - return this; - } - - public Builder setResultFinalizationWindow(Long resultFinalizationWindow) { - this.resultFinalizationWindow = resultFinalizationWindow; - return this; - } - public Builder setMultivariateByFields(Boolean multivariateByFields) { this.multivariateByFields = multivariateByFields; return this; @@ -409,8 +369,7 @@ public Builder setMultivariateByFields(Boolean multivariateByFields) { public AnalysisConfig build() { return new AnalysisConfig(bucketSpan, categorizationFieldName, categorizationFilters, categorizationAnalyzerConfig, - latency, summaryCountFieldName, detectors, influencers, overlappingBuckets, - resultFinalizationWindow, multivariateByFields); + latency, summaryCountFieldName, detectors, influencers, multivariateByFields); } } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java index 6c52f7c9bdd8a..a303f4014724c 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ml/job/process/ModelSnapshot.java @@ -161,6 +161,10 @@ public Quantiles getQuantiles() { return quantiles; } + public boolean getRetain() { + return retain; + } + public Date getLatestRecordTimeStamp() { return latestRecordTimeStamp; } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesRequest.java new file mode 100644 index 0000000000000..0e47c81d6eaa0 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesRequest.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.security.user.privileges.ApplicationResourcePrivileges; +import org.elasticsearch.client.security.user.privileges.IndicesPrivileges; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; +import java.util.Set; + +import static java.util.Collections.emptySet; +import static java.util.Collections.unmodifiableSet; + +/** + * Request to determine whether the current user has a list of privileges. + */ +public final class HasPrivilegesRequest implements Validatable, ToXContentObject { + + private final Set clusterPrivileges; + private final Set indexPrivileges; + private final Set applicationPrivileges; + + public HasPrivilegesRequest(@Nullable Set clusterPrivileges, + @Nullable Set indexPrivileges, + @Nullable Set applicationPrivileges) { + this.clusterPrivileges = clusterPrivileges == null ? emptySet() : unmodifiableSet(clusterPrivileges); + this.indexPrivileges = indexPrivileges == null ? emptySet() : unmodifiableSet(indexPrivileges); + this.applicationPrivileges = applicationPrivileges == null ? emptySet() : unmodifiableSet(applicationPrivileges); + + if (this.clusterPrivileges.isEmpty() && this.indexPrivileges.isEmpty() && this.applicationPrivileges.isEmpty()) { + throw new IllegalArgumentException("At last 1 privilege must be specified"); + } + } + + public Set getClusterPrivileges() { + return clusterPrivileges; + } + + public Set getIndexPrivileges() { + return indexPrivileges; + } + + public Set getApplicationPrivileges() { + return applicationPrivileges; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field("cluster", clusterPrivileges) + .field("index", indexPrivileges) + .field("application", applicationPrivileges) + .endObject(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final HasPrivilegesRequest that = (HasPrivilegesRequest) o; + return Objects.equals(clusterPrivileges, that.clusterPrivileges) && + Objects.equals(indexPrivileges, that.indexPrivileges) && + Objects.equals(applicationPrivileges, that.applicationPrivileges); + } + + @Override + public int hashCode() { + return Objects.hash(clusterPrivileges, indexPrivileges, applicationPrivileges); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesResponse.java new file mode 100644 index 0000000000000..41ba3a4bcb038 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/HasPrivilegesResponse.java @@ -0,0 +1,252 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.function.BiConsumer; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; + +/** + * Response when checking whether the current user has a defined set of privileges. + */ +public final class HasPrivilegesResponse { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "has_privileges_response", true, args -> new HasPrivilegesResponse( + (String) args[0], (Boolean) args[1], checkMap(args[2], 0), checkMap(args[3], 1), checkMap(args[4], 2))); + + static { + PARSER.declareString(constructorArg(), new ParseField("username")); + PARSER.declareBoolean(constructorArg(), new ParseField("has_all_requested")); + declareMap(constructorArg(), "cluster"); + declareMap(constructorArg(), "index"); + declareMap(constructorArg(), "application"); + } + + @SuppressWarnings("unchecked") + private static Map checkMap(Object argument, int depth) { + if (argument instanceof Map) { + Map map = (Map) argument; + if (depth == 0) { + map.values().stream() + .filter(val -> (val instanceof Boolean) == false) + .forEach(val -> { + throw new IllegalArgumentException("Map value [" + val + "] in [" + map + "] is not a Boolean"); + }); + } else { + map.values().stream().forEach(val -> checkMap(val, depth - 1)); + } + return map; + } + throw new IllegalArgumentException("Value [" + argument + "] is not an Object"); + } + + private static void declareMap(BiConsumer> arg, String name) { + PARSER.declareField(arg, XContentParser::map, new ParseField(name), ObjectParser.ValueType.OBJECT); + } + + private final String username; + private final boolean hasAllRequested; + private final Map clusterPrivileges; + private final Map> indexPrivileges; + private final Map>> applicationPrivileges; + + public HasPrivilegesResponse(String username, boolean hasAllRequested, + Map clusterPrivileges, + Map> indexPrivileges, + Map>> applicationPrivileges) { + this.username = username; + this.hasAllRequested = hasAllRequested; + this.clusterPrivileges = Collections.unmodifiableMap(clusterPrivileges); + this.indexPrivileges = unmodifiableMap2(indexPrivileges); + this.applicationPrivileges = unmodifiableMap3(applicationPrivileges); + } + + private static Map> unmodifiableMap2(final Map> map) { + final Map> copy = new HashMap<>(map); + copy.replaceAll((k, v) -> Collections.unmodifiableMap(v)); + return Collections.unmodifiableMap(copy); + } + + private static Map>> unmodifiableMap3( + final Map>> map) { + final Map>> copy = new HashMap<>(map); + copy.replaceAll((k, v) -> unmodifiableMap2(v)); + return Collections.unmodifiableMap(copy); + } + + public static HasPrivilegesResponse fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + /** + * The username (principal) of the user for which the privileges check was executed. + */ + public String getUsername() { + return username; + } + + /** + * {@code true} if the user has every privilege that was checked. Otherwise {@code false}. + */ + public boolean hasAllRequested() { + return hasAllRequested; + } + + /** + * @param clusterPrivilegeName The name of a cluster privilege. This privilege must have been specified (verbatim) in the + * {@link HasPrivilegesRequest#getClusterPrivileges() cluster privileges of the request}. + * @return {@code true} if the user has the specified cluster privilege. {@code false} if the privilege was checked + * but it has not been granted to the user. + * @throws IllegalArgumentException if the response did not include a value for the specified privilege name. + * The response only includes values for privileges that were + * {@link HasPrivilegesRequest#getClusterPrivileges() included in the request}. + */ + public boolean hasClusterPrivilege(String clusterPrivilegeName) throws IllegalArgumentException { + Boolean has = clusterPrivileges.get(clusterPrivilegeName); + if (has == null) { + throw new IllegalArgumentException("Cluster privilege [" + clusterPrivilegeName + "] was not included in this response"); + } + return has; + } + + /** + * @param indexName The name of the index to check. This index must have been specified (verbatim) in the + * {@link HasPrivilegesRequest#getIndexPrivileges() requested index privileges}. + * @param privilegeName The name of the index privilege to check. This privilege must have been specified (verbatim), for the + * given index, in the {@link HasPrivilegesRequest#getIndexPrivileges() requested index privileges}. + * @return {@code true} if the user has the specified privilege on the specified index. {@code false} if the privilege was checked + * for that index and was not granted to the user. + * @throws IllegalArgumentException if the response did not include a value for the specified index and privilege name pair. + * The response only includes values for indices and privileges that were + * {@link HasPrivilegesRequest#getIndexPrivileges() included in the request}. + */ + public boolean hasIndexPrivilege(String indexName, String privilegeName) { + Map indexPrivileges = this.indexPrivileges.get(indexName); + if (indexPrivileges == null) { + throw new IllegalArgumentException("No privileges for index [" + indexName + "] were included in this response"); + } + Boolean has = indexPrivileges.get(privilegeName); + if (has == null) { + throw new IllegalArgumentException("Privilege [" + privilegeName + "] was not included in the response for index [" + + indexName + "]"); + } + return has; + } + + /** + * @param applicationName The name of the application to check. This application must have been specified (verbatim) in the + * {@link HasPrivilegesRequest#getApplicationPrivileges() requested application privileges}. + * @param resourceName The name of the resource to check. This resource must have been specified (verbatim), for the given + * application in the {@link HasPrivilegesRequest#getApplicationPrivileges() requested application privileges}. + * @param privilegeName The name of the privilege to check. This privilege must have been specified (verbatim), for the given + * application and resource, in the + * {@link HasPrivilegesRequest#getApplicationPrivileges() requested application privileges}. + * @return {@code true} if the user has the specified privilege on the specified resource in the specified application. + * {@code false} if the privilege was checked for that application and resource, but was not granted to the user. + * @throws IllegalArgumentException if the response did not include a value for the specified application, resource and privilege + * triplet. The response only includes values for applications, resources and privileges that were + * {@link HasPrivilegesRequest#getApplicationPrivileges() included in the request}. + */ + public boolean hasApplicationPrivilege(String applicationName, String resourceName, String privilegeName) { + final Map> appPrivileges = this.applicationPrivileges.get(applicationName); + if (appPrivileges == null) { + throw new IllegalArgumentException("No privileges for application [" + applicationName + "] were included in this response"); + } + final Map resourcePrivileges = appPrivileges.get(resourceName); + if (resourcePrivileges == null) { + throw new IllegalArgumentException("No privileges for resource [" + resourceName + + "] were included in the response for application [" + applicationName + "]"); + } + Boolean has = resourcePrivileges.get(privilegeName); + if (has == null) { + throw new IllegalArgumentException("Privilege [" + privilegeName + "] was not included in the response for application [" + + applicationName + "] and resource [" + resourceName + "]"); + } + return has; + } + + /** + * A {@code Map} from cluster-privilege-name to access. Each requested privilege is included as a key in the map, and the + * associated value indicates whether the user was granted that privilege. + *

+ * The {@link #hasClusterPrivilege} method should be used in preference to directly accessing this map. + *

+ */ + public Map getClusterPrivileges() { + return clusterPrivileges; + } + + /** + * A {@code Map} from index-name + privilege-name to access. Each requested index is a key in the outer map. + * Each requested privilege is a key in the inner map. The inner most {@code Boolean} value indicates whether + * the user was granted that privilege on that index. + *

+ * The {@link #hasIndexPrivilege} method should be used in preference to directly accessing this map. + *

+ */ + public Map> getIndexPrivileges() { + return indexPrivileges; + } + + /** + * A {@code Map} from application-name + resource-name + privilege-name to access. Each requested application is a key in the + * outer-most map. Each requested resource is a key in the next-level map. The requested privileges form the keys in the inner-most map. + * The {@code Boolean} value indicates whether the user was granted that privilege on that resource within that application. + *

+ * The {@link #hasApplicationPrivilege} method should be used in preference to directly accessing this map. + *

+ */ + public Map>> getApplicationPrivileges() { + return applicationPrivileges; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || this.getClass() != o.getClass()) { + return false; + } + final HasPrivilegesResponse that = (HasPrivilegesResponse) o; + return this.hasAllRequested == that.hasAllRequested && + Objects.equals(this.username, that.username) && + Objects.equals(this.clusterPrivileges, that.clusterPrivileges) && + Objects.equals(this.indexPrivileges, that.indexPrivileges) && + Objects.equals(this.applicationPrivileges, that.applicationPrivileges); + } + + @Override + public int hashCode() { + return Objects.hash(username, hasAllRequested, clusterPrivileges, indexPrivileges, applicationPrivileges); + } +} + diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index fdd5634ddd6bd..95b2fc0a43bb5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -28,14 +28,18 @@ import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.search.SearchHit; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; +import java.io.IOException; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -44,10 +48,19 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - +import java.util.stream.IntStream; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.fieldFromSource; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasIndex; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasProperty; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasType; import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; @@ -268,23 +281,124 @@ public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception assertMultiGetResponse(highLevelClient().mget(multiGetRequest, RequestOptions.DEFAULT), testDocs); } - private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { + @SuppressWarnings("unchecked") + public void testGlobalParametersAndSingleRequest() throws Exception { + createIndexWithMultipleShards("test"); + + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ"); + + // tag::bulk-processor-mix-parameters + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + .setGlobalIndex("tweets") + .setGlobalType("_doc") + .setGlobalRouting("routing") + .setGlobalPipeline("pipeline_id") + .build()) { + + + processor.add(new IndexRequest() // <1> + .source(XContentType.JSON, "user", "some user")); + processor.add(new IndexRequest("blogs", "post_type", "1") // <2> + .source(XContentType.JSON, "title", "some title")); + } + // end::bulk-processor-mix-parameters + latch.await(); + + Iterable hits = searchAll(new SearchRequest("tweets").routing("routing")); + assertThat(hits, everyItem(hasProperty(fieldFromSource("user"), equalTo("some user")))); + assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); + + + Iterable blogs = searchAll(new SearchRequest("blogs").routing("routing")); + assertThat(blogs, everyItem(hasProperty(fieldFromSource("title"), equalTo("some title")))); + assertThat(blogs, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); + } + + @SuppressWarnings("unchecked") + public void testGlobalParametersAndBulkProcessor() throws Exception { + createIndexWithMultipleShards("test"); + + final CountDownLatch latch = new CountDownLatch(1); + BulkProcessorTestListener listener = new BulkProcessorTestListener(latch); + createFieldAddingPipleine("pipeline_id", "fieldNameXYZ", "valueXYZ"); + + int numDocs = randomIntBetween(10, 10); + try (BulkProcessor processor = initBulkProcessorBuilder(listener) + //let's make sure that the bulk action limit trips, one single execution will index all the documents + .setConcurrentRequests(randomIntBetween(0, 1)).setBulkActions(numDocs) + .setFlushInterval(TimeValue.timeValueHours(24)).setBulkSize(new ByteSizeValue(1, ByteSizeUnit.GB)) + .setGlobalIndex("test") + .setGlobalType("test") + .setGlobalRouting("routing") + .setGlobalPipeline("pipeline_id") + .build()) { + + indexDocs(processor, numDocs, null, null, "test", "test", "pipeline_id"); + latch.await(); + + assertThat(listener.beforeCounts.get(), equalTo(1)); + assertThat(listener.afterCounts.get(), equalTo(1)); + assertThat(listener.bulkFailures.size(), equalTo(0)); + assertResponseItems(listener.bulkItems, numDocs); + + Iterable hits = searchAll(new SearchRequest("test").routing("routing")); + + assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); + assertThat(hits, everyItem(Matchers.allOf(hasIndex("test"), hasType("test")))); + assertThat(hits, containsInAnyOrder(expectedIds(numDocs))); + } + } + + @SuppressWarnings("unchecked") + private Matcher[] expectedIds(int numDocs) { + return IntStream.rangeClosed(1, numDocs) + .boxed() + .map(n -> hasId(n.toString())) + .>toArray(Matcher[]::new); + } + + private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs, String localIndex, String localType, + String globalIndex, String globalType, String globalPipeline) throws Exception { MultiGetRequest multiGetRequest = new MultiGetRequest(); for (int i = 1; i <= numDocs; i++) { if (randomBoolean()) { - processor.add(new IndexRequest("test", "test", Integer.toString(i)) - .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); + processor.add(new IndexRequest(localIndex, localType, Integer.toString(i)) + .source(XContentType.JSON, "field", randomRealisticUnicodeOfLengthBetween(1, 30))); } else { - final String source = "{ \"index\":{\"_index\":\"test\",\"_type\":\"test\",\"_id\":\"" + Integer.toString(i) + "\"} }\n" - + Strings.toString(JsonXContent.contentBuilder() - .startObject().field("field", randomRealisticUnicodeOfLengthBetween(1, 30)).endObject()) + "\n"; - processor.add(new BytesArray(source), null, null, XContentType.JSON); + BytesArray data = bytesBulkRequest(localIndex, localType, i); + processor.add(data, globalIndex, globalType, globalPipeline, null, XContentType.JSON); } - multiGetRequest.add("test", "test", Integer.toString(i)); + multiGetRequest.add(localIndex, localType, Integer.toString(i)); } return multiGetRequest; } + private static BytesArray bytesBulkRequest(String localIndex, String localType, int id) throws IOException { + String action = Strings.toString(jsonBuilder() + .startObject() + .startObject("index") + .field("_index", localIndex) + .field("_type", localType) + .field("_id", Integer.toString(id)) + .endObject() + .endObject() + ); + String source = Strings.toString(jsonBuilder() + .startObject() + .field("field", randomRealisticUnicodeOfLengthBetween(1, 30)) + .endObject() + ); + + String request = action + "\n" + source + "\n"; + return new BytesArray(request); + } + + private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) throws Exception { + return indexDocs(processor, numDocs, "test", "test", null, null, null); + } + private static void assertResponseItems(List bulkItemResponses, int numDocs) { assertThat(bulkItemResponses.size(), is(numDocs)); int i = 1; @@ -343,4 +457,5 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) } } + } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java new file mode 100644 index 0000000000000..cf8f1ebfdbd76 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkRequestWithGlobalParametersIT.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.SearchHit; + +import java.io.IOException; +import java.util.function.Function; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasIndex; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasProperty; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasType; +import static org.hamcrest.Matchers.both; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.emptyIterable; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.nullValue; + +public class BulkRequestWithGlobalParametersIT extends ESRestHighLevelClientTestCase { + + @SuppressWarnings("unchecked") + public void testGlobalPipelineOnBulkRequest() throws IOException { + createFieldAddingPipleine("xyz", "fieldNameXYZ", "valueXYZ"); + + BulkRequest request = new BulkRequest(); + request.add(new IndexRequest("test", "doc", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("test", "doc", "2") + .source(XContentType.JSON, "field", "bulk2")); + request.pipeline("xyz"); + + bulk(request); + + Iterable hits = searchAll("test"); + assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); + assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldNameXYZ"), equalTo("valueXYZ")))); + } + + public void testPipelineOnRequestOverridesGlobalPipeline() throws IOException { + createFieldAddingPipleine("globalId", "fieldXYZ", "valueXYZ"); + createFieldAddingPipleine("perIndexId", "someNewField", "someValue"); + + BulkRequest request = new BulkRequest(); + request.pipeline("globalId"); + request.add(new IndexRequest("test", "doc", "1") + .source(XContentType.JSON, "field", "bulk1") + .setPipeline("perIndexId")); + request.add(new IndexRequest("test", "doc", "2") + .source(XContentType.JSON, "field", "bulk2") + .setPipeline("perIndexId")); + + bulk(request); + + Iterable hits = searchAll("test"); + assertThat(hits, everyItem(hasProperty(fieldFromSource("someNewField"), equalTo("someValue")))); + // global pipeline was not applied + assertThat(hits, everyItem(hasProperty(fieldFromSource("fieldXYZ"), nullValue()))); + } + + @SuppressWarnings("unchecked") + public void testMixPipelineOnRequestAndGlobal() throws IOException { + createFieldAddingPipleine("globalId", "fieldXYZ", "valueXYZ"); + createFieldAddingPipleine("perIndexId", "someNewField", "someValue"); + + // tag::bulk-request-mix-pipeline + BulkRequest request = new BulkRequest(); + request.pipeline("globalId"); + + request.add(new IndexRequest("test", "doc", "1") + .source(XContentType.JSON, "field", "bulk1") + .setPipeline("perIndexId")); // <1> + + request.add(new IndexRequest("test", "doc", "2") + .source(XContentType.JSON, "field", "bulk2")); // <2> + // end::bulk-request-mix-pipeline + bulk(request); + + Iterable hits = searchAll("test"); + assertThat(hits, containsInAnyOrder( + both(hasId("1")) + .and(hasProperty(fieldFromSource("someNewField"), equalTo("someValue"))), + both(hasId("2")) + .and(hasProperty(fieldFromSource("fieldXYZ"), equalTo("valueXYZ"))))); + } + + public void testGlobalIndex() throws IOException { + BulkRequest request = new BulkRequest("global_index", null); + request.add(new IndexRequest().type("doc").id("1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest().type("doc").id("2") + .source(XContentType.JSON, "field", "bulk2")); + + bulk(request); + + Iterable hits = searchAll("global_index"); + assertThat(hits, everyItem(hasIndex("global_index"))); + } + + @SuppressWarnings("unchecked") + public void testIndexGlobalAndPerRequest() throws IOException { + BulkRequest request = new BulkRequest("global_index", null); + request.add(new IndexRequest("local_index", "doc", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest().type("doc").id("2") // will take global index + .source(XContentType.JSON, "field", "bulk2")); + + bulk(request); + + Iterable hits = searchAll("local_index", "global_index"); + assertThat(hits, containsInAnyOrder( + both(hasId("1")) + .and(hasIndex("local_index")), + both(hasId("2")) + .and(hasIndex("global_index")))); + } + + public void testGlobalType() throws IOException { + BulkRequest request = new BulkRequest(null, "global_type"); + request.add(new IndexRequest("index").id("1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index").id("2") + .source(XContentType.JSON, "field", "bulk2")); + + bulk(request); + + Iterable hits = searchAll("index"); + assertThat(hits, everyItem(hasType("global_type"))); + } + + @SuppressWarnings("unchecked") + public void testTypeGlobalAndPerRequest() throws IOException { + BulkRequest request = new BulkRequest(null, "global_type"); + request.add(new IndexRequest("index1", "local_type", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index2").id("2") // will take global type + .source(XContentType.JSON, "field", "bulk2")); + + bulk(request); + + Iterable hits = searchAll("index1", "index2"); + assertThat(hits, containsInAnyOrder( + both(hasId("1")) + .and(hasType("local_type")), + both(hasId("2")) + .and(hasType("global_type")))); + } + + @SuppressWarnings("unchecked") + public void testGlobalRouting() throws IOException { + createIndexWithMultipleShards("index"); + BulkRequest request = new BulkRequest(null, null); + request.add(new IndexRequest("index", "type", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index", "type", "2") + .source(XContentType.JSON, "field", "bulk1")); + request.routing("1"); + bulk(request); + + Iterable emptyHits = searchAll(new SearchRequest("index").routing("xxx")); + assertThat(emptyHits, is(emptyIterable())); + + Iterable hits = searchAll(new SearchRequest("index").routing("1")); + assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); + } + + @SuppressWarnings("unchecked") + public void testMixLocalAndGlobalRouting() throws IOException { + BulkRequest request = new BulkRequest(null, null); + request.routing("globalRouting"); + request.add(new IndexRequest("index", "type", "1") + .source(XContentType.JSON, "field", "bulk1")); + request.add(new IndexRequest("index", "type", "2") + .routing("localRouting") + .source(XContentType.JSON, "field", "bulk1")); + + bulk(request); + + Iterable hits = searchAll(new SearchRequest("index").routing("globalRouting", "localRouting")); + assertThat(hits, containsInAnyOrder(hasId("1"), hasId("2"))); + } + + private BulkResponse bulk(BulkRequest request) throws IOException { + BulkResponse bulkResponse = execute(request, highLevelClient()::bulk, highLevelClient()::bulkAsync); + assertFalse(bulkResponse.hasFailures()); + return bulkResponse; + } + + @SuppressWarnings("unchecked") + private static Function fieldFromSource(String fieldName) { + return (response) -> (T) response.getSourceAsMap().get(fieldName); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java index f98cbbb2b85c0..0aa4b68263a30 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java @@ -22,6 +22,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.index.IndexRequest; @@ -31,6 +32,8 @@ import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutFollowRequest; import org.elasticsearch.client.ccr.PutFollowResponse; +import org.elasticsearch.client.ccr.ResumeFollowRequest; +import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -63,7 +66,7 @@ public void setupRemoteClusterConfig() throws IOException { assertThat(updateSettingsResponse.isAcknowledged(), is(true)); } - public void testCCR() throws Exception { + public void testIndexFollowing() throws Exception { CcrClient ccrClient = highLevelClient().ccr(); CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); @@ -95,6 +98,33 @@ public void testCCR() throws Exception { PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower"); AcknowledgedResponse pauseFollowResponse = execute(pauseFollowRequest, ccrClient::pauseFollow, ccrClient::pauseFollowAsync); assertThat(pauseFollowResponse.isAcknowledged(), is(true)); + + highLevelClient().index(indexRequest, RequestOptions.DEFAULT); + + ResumeFollowRequest resumeFollowRequest = new ResumeFollowRequest("follower"); + AcknowledgedResponse resumeFollowResponse = execute(resumeFollowRequest, ccrClient::resumeFollow, ccrClient::resumeFollowAsync); + assertThat(resumeFollowResponse.isAcknowledged(), is(true)); + + assertBusy(() -> { + SearchRequest followerSearchRequest = new SearchRequest("follower"); + SearchResponse followerSearchResponse = highLevelClient().search(followerSearchRequest, RequestOptions.DEFAULT); + assertThat(followerSearchResponse.getHits().getTotalHits(), equalTo(2L)); + }); + + // Need to pause prior to unfollowing it: + pauseFollowRequest = new PauseFollowRequest("follower"); + pauseFollowResponse = execute(pauseFollowRequest, ccrClient::pauseFollow, ccrClient::pauseFollowAsync); + assertThat(pauseFollowResponse.isAcknowledged(), is(true)); + + // Need to close index prior to unfollowing it: + CloseIndexRequest closeIndexRequest = new CloseIndexRequest("follower"); + org.elasticsearch.action.support.master.AcknowledgedResponse closeIndexReponse = + highLevelClient().indices().close(closeIndexRequest, RequestOptions.DEFAULT); + assertThat(closeIndexReponse.isAcknowledged(), is(true)); + + UnfollowRequest unfollowRequest = new UnfollowRequest("follower"); + AcknowledgedResponse unfollowResponse = execute(unfollowRequest, ccrClient::unfollow, ccrClient::unfollowAsync); + assertThat(unfollowResponse.isAcknowledged(), is(true)); } private static Map toMap(Response response) throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index 2b9dfd672bb96..211b2831907e9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -44,6 +44,8 @@ import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; +import org.elasticsearch.client.core.MultiTermVectorsRequest; +import org.elasticsearch.client.core.MultiTermVectorsResponse; import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.common.Strings; @@ -73,6 +75,7 @@ import org.joda.time.format.DateTimeFormat; import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; @@ -1250,4 +1253,69 @@ public void testTermvectorsWithNonExistentIndex() { () -> execute(request, highLevelClient()::termvectors, highLevelClient()::termvectorsAsync)); assertEquals(RestStatus.NOT_FOUND, exception.status()); } + + // Not entirely sure if _mtermvectors belongs to CRUD, and in the absence of a better place, will have it here + public void testMultiTermvectors() throws IOException { + final String sourceIndex = "index1"; + { + // prepare : index docs + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + String mappings = "\"_doc\":{\"properties\":{\"field\":{\"type\":\"text\"}}}"; + createIndex(sourceIndex, settings, mappings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex, "_doc", "1") + .source(Collections.singletonMap("field", "value1"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex, "_doc", "2") + .source(Collections.singletonMap("field", "value2"), XContentType.JSON)) + .setRefreshPolicy(RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test _mtermvectors where MultiTermVectorsRequest is constructed with ids and a template + String[] expectedIds = {"1", "2"}; + TermVectorsRequest tvRequestTemplate = new TermVectorsRequest(sourceIndex, "_doc", "fake_id"); + tvRequestTemplate.setFields("field"); + MultiTermVectorsRequest mtvRequest = new MultiTermVectorsRequest(expectedIds, tvRequestTemplate); + + MultiTermVectorsResponse mtvResponse = + execute(mtvRequest, highLevelClient()::mtermvectors, highLevelClient()::mtermvectorsAsync); + + List ids = new ArrayList<>(); + for (TermVectorsResponse tvResponse: mtvResponse.getTermVectorsResponses()) { + assertThat(tvResponse.getIndex(), equalTo(sourceIndex)); + assertTrue(tvResponse.getFound()); + ids.add(tvResponse.getId()); + } + assertArrayEquals(expectedIds, ids.toArray()); + } + + { + // test _mtermvectors where MultiTermVectorsRequest constructed with adding each separate request + MultiTermVectorsRequest mtvRequest = new MultiTermVectorsRequest(); + TermVectorsRequest tvRequest1 = new TermVectorsRequest(sourceIndex, "_doc", "1"); + tvRequest1.setFields("field"); + mtvRequest.add(tvRequest1); + + XContentBuilder docBuilder = XContentFactory.jsonBuilder(); + docBuilder.startObject().field("field", "valuex").endObject(); + TermVectorsRequest tvRequest2 = new TermVectorsRequest(sourceIndex, "_doc", docBuilder); + mtvRequest.add(tvRequest2); + + MultiTermVectorsResponse mtvResponse = + execute(mtvRequest, highLevelClient()::mtermvectors, highLevelClient()::mtermvectorsAsync); + for (TermVectorsResponse tvResponse: mtvResponse.getTermVectorsResponses()) { + assertThat(tvResponse.getIndex(), equalTo(sourceIndex)); + assertTrue(tvResponse.getFound()); + } + } + + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 083529ae214ef..b05e5c44980c1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -22,7 +22,10 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -31,15 +34,20 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ingest.Pipeline; +import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.AfterClass; import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Base64; import java.util.Collections; import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; public abstract class ESRestHighLevelClientTestCase extends ESRestTestCase { @@ -189,6 +197,22 @@ protected static XContentBuilder buildRandomXContentPipeline() throws IOExceptio return buildRandomXContentPipeline(pipelineBuilder); } + protected static void createFieldAddingPipleine(String id, String fieldName, String value) throws IOException { + XContentBuilder pipeline = jsonBuilder() + .startObject() + .startArray("processors") + .startObject() + .startObject("set") + .field("field", fieldName) + .field("value", value) + .endObject() + .endObject() + .endArray() + .endObject(); + + createPipeline(new PutPipelineRequest(id, BytesReference.bytes(pipeline), XContentType.JSON)); + } + protected static void createPipeline(String pipelineId) throws IOException { XContentBuilder builder = buildRandomXContentPipeline(); createPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(builder), builder.contentType())); @@ -218,4 +242,32 @@ protected Settings restClientSettings() { .put(ThreadContext.PREFIX + ".Authorization", token) .build(); } + + protected Iterable searchAll(String... indices) throws IOException { + SearchRequest searchRequest = new SearchRequest(indices); + return searchAll(searchRequest); + } + + protected Iterable searchAll(SearchRequest searchRequest) throws IOException { + refreshIndexes(searchRequest.indices()); + SearchResponse search = highLevelClient().search(searchRequest, RequestOptions.DEFAULT); + return search.getHits(); + } + + protected void refreshIndexes(String... indices) throws IOException { + String joinedIndices = Arrays.stream(indices) + .collect(Collectors.joining(",")); + Response refreshResponse = client().performRequest(new Request("POST", "/" + joinedIndices + "/_refresh")); + assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); + } + + protected void createIndexWithMultipleShards(String index) throws IOException { + CreateIndexRequest indexRequest = new CreateIndexRequest(index); + int shards = randomIntBetween(8,10); + indexRequest.settings(Settings.builder() + .put("index.number_of_shards", shards) + .put("index.number_of_replicas", 0) + ); + highLevelClient().indices().create(indexRequest, RequestOptions.DEFAULT); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java index 9ed19e9afe734..a916353355b77 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndexLifecycleIT.java @@ -272,8 +272,8 @@ public void testRetryLifecycleStep() throws IOException { RetryLifecyclePolicyRequest retryRequest = new RetryLifecyclePolicyRequest("retry"); ElasticsearchStatusException ex = expectThrows(ElasticsearchStatusException.class, () -> execute( - retryRequest, highLevelClient().indexLifecycle()::retryLifecycleStep, - highLevelClient().indexLifecycle()::retryLifecycleStepAsync + retryRequest, highLevelClient().indexLifecycle()::retryLifecyclePolicy, + highLevelClient().indexLifecycle()::retryLifecyclePolicyAsync ) ); assertEquals(400, ex.status().getStatus()); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java index 0381fe88bb49a..8bbad5a059ac3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MLRequestConvertersTests.java @@ -24,6 +24,7 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.ml.CloseJobRequest; +import org.elasticsearch.client.ml.DeleteCalendarJobRequest; import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteFilterRequest; @@ -33,6 +34,7 @@ import org.elasticsearch.client.ml.FlushJobRequest; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.GetBucketsRequest; +import org.elasticsearch.client.ml.GetCalendarEventsRequest; import org.elasticsearch.client.ml.GetCalendarsRequest; import org.elasticsearch.client.ml.GetCategoriesRequest; import org.elasticsearch.client.ml.GetDatafeedRequest; @@ -45,19 +47,25 @@ import org.elasticsearch.client.ml.GetOverallBucketsRequest; import org.elasticsearch.client.ml.GetRecordsRequest; import org.elasticsearch.client.ml.OpenJobRequest; +import org.elasticsearch.client.ml.PostCalendarEventRequest; import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PreviewDatafeedRequest; +import org.elasticsearch.client.ml.PutCalendarJobRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutDatafeedRequest; import org.elasticsearch.client.ml.PutFilterRequest; import org.elasticsearch.client.ml.PutJobRequest; +import org.elasticsearch.client.ml.RevertModelSnapshotRequest; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedRequestTests; import org.elasticsearch.client.ml.StopDatafeedRequest; import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.UpdateModelSnapshotRequest; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.client.ml.calendars.CalendarTests; +import org.elasticsearch.client.ml.calendars.ScheduledEvent; +import org.elasticsearch.client.ml.calendars.ScheduledEventTests; import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.datafeed.DatafeedConfigTests; import org.elasticsearch.client.ml.job.config.AnalysisConfig; @@ -70,6 +78,7 @@ import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -80,6 +89,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; import static org.hamcrest.Matchers.equalTo; @@ -421,6 +431,40 @@ public void testGetModelSnapshots() throws IOException { } } + public void testUpdateModelSnapshot() throws IOException { + String jobId = randomAlphaOfLength(10); + String snapshotId = randomAlphaOfLength(10); + UpdateModelSnapshotRequest updateModelSnapshotRequest = new UpdateModelSnapshotRequest(jobId, snapshotId); + updateModelSnapshotRequest.setDescription("My First Snapshot"); + updateModelSnapshotRequest.setRetain(true); + + Request request = MLRequestConverters.updateModelSnapshot(updateModelSnapshotRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/model_snapshots/" + snapshotId + "/_update", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + UpdateModelSnapshotRequest parsedRequest = UpdateModelSnapshotRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(updateModelSnapshotRequest)); + } + } + + public void testRevertModelSnapshot() throws IOException { + String jobId = randomAlphaOfLength(10); + String snapshotId = randomAlphaOfLength(10); + RevertModelSnapshotRequest revertModelSnapshotRequest = new RevertModelSnapshotRequest(jobId, snapshotId); + if (randomBoolean()) { + revertModelSnapshotRequest.setDeleteInterveningResults(randomBoolean()); + } + + Request request = MLRequestConverters.revertModelSnapshot(revertModelSnapshotRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/anomaly_detectors/" + jobId + "/model_snapshots/" + snapshotId + "/_revert", + request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + RevertModelSnapshotRequest parsedRequest = RevertModelSnapshotRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(revertModelSnapshotRequest)); + } + } + public void testGetOverallBuckets() throws IOException { String jobId = randomAlphaOfLength(10); GetOverallBucketsRequest getOverallBucketsRequest = new GetOverallBucketsRequest(jobId); @@ -518,6 +562,26 @@ public void testPutCalendar() throws IOException { } } + public void testPutCalendarJob() { + String calendarId = randomAlphaOfLength(10); + String job1 = randomAlphaOfLength(5); + String job2 = randomAlphaOfLength(5); + PutCalendarJobRequest putCalendarJobRequest = new PutCalendarJobRequest(calendarId, job1, job2); + Request request = MLRequestConverters.putCalendarJob(putCalendarJobRequest); + assertEquals(HttpPut.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/calendars/" + calendarId + "/jobs/" + job1 + "," + job2, request.getEndpoint()); + } + + public void testDeleteCalendarJob() { + String calendarId = randomAlphaOfLength(10); + String job1 = randomAlphaOfLength(5); + String job2 = randomAlphaOfLength(5); + DeleteCalendarJobRequest deleteCalendarJobRequest = new DeleteCalendarJobRequest(calendarId, job1, job2); + Request request = MLRequestConverters.deleteCalendarJob(deleteCalendarJobRequest); + assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/calendars/" + calendarId + "/jobs/" + job1 + "," + job2, request.getEndpoint()); + } + public void testGetCalendars() throws IOException { GetCalendarsRequest getCalendarsRequest = new GetCalendarsRequest(); String expectedEndpoint = "/_xpack/ml/calendars"; @@ -547,6 +611,39 @@ public void testDeleteCalendar() { assertEquals("/_xpack/ml/calendars/" + deleteCalendarRequest.getCalendarId(), request.getEndpoint()); } + public void testGetCalendarEvents() throws IOException { + String calendarId = randomAlphaOfLength(10); + GetCalendarEventsRequest getCalendarEventsRequest = new GetCalendarEventsRequest(calendarId); + getCalendarEventsRequest.setStart("2018-08-08T00:00:00Z"); + getCalendarEventsRequest.setEnd("2018-09-08T00:00:00Z"); + getCalendarEventsRequest.setPageParams(new PageParams(100, 300)); + getCalendarEventsRequest.setJobId(randomAlphaOfLength(10)); + + Request request = MLRequestConverters.getCalendarEvents(getCalendarEventsRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/calendars/" + calendarId + "/events", request.getEndpoint()); + try (XContentParser parser = createParser(JsonXContent.jsonXContent, request.getEntity().getContent())) { + GetCalendarEventsRequest parsedRequest = GetCalendarEventsRequest.PARSER.apply(parser, null); + assertThat(parsedRequest, equalTo(getCalendarEventsRequest)); + } + } + + public void testPostCalendarEvent() throws Exception { + String calendarId = randomAlphaOfLength(10); + List events = Arrays.asList(ScheduledEventTests.testInstance(), + ScheduledEventTests.testInstance(), + ScheduledEventTests.testInstance()); + PostCalendarEventRequest postCalendarEventRequest = new PostCalendarEventRequest(calendarId, events); + + Request request = MLRequestConverters.postCalendarEvents(postCalendarEventRequest); + assertEquals(HttpPost.METHOD_NAME, request.getMethod()); + assertEquals("/_xpack/ml/calendars/" + calendarId + "/events", request.getEndpoint()); + + XContentBuilder builder = JsonXContent.contentBuilder(); + builder = postCalendarEventRequest.toXContent(builder, PostCalendarEventRequest.EXCLUDE_CALENDAR_ID_PARAMS); + assertEquals(Strings.toString(builder), requestEntityToString(request)); + } + public void testPutFilter() throws IOException { MlFilter filter = MlFilterTests.createRandomBuilder("foo").build(); PutFilterRequest putFilterRequest = new PutFilterRequest(filter); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java index 8fac48854e511..0f4f44b15a0ba 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/MachineLearningIT.java @@ -29,6 +29,7 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteCalendarJobRequest; import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteFilterRequest; @@ -40,6 +41,8 @@ import org.elasticsearch.client.ml.FlushJobResponse; import org.elasticsearch.client.ml.ForecastJobRequest; import org.elasticsearch.client.ml.ForecastJobResponse; +import org.elasticsearch.client.ml.GetCalendarEventsRequest; +import org.elasticsearch.client.ml.GetCalendarEventsResponse; import org.elasticsearch.client.ml.GetCalendarsRequest; import org.elasticsearch.client.ml.GetCalendarsResponse; import org.elasticsearch.client.ml.GetDatafeedRequest; @@ -52,12 +55,17 @@ import org.elasticsearch.client.ml.GetJobResponse; import org.elasticsearch.client.ml.GetJobStatsRequest; import org.elasticsearch.client.ml.GetJobStatsResponse; +import org.elasticsearch.client.ml.GetModelSnapshotsRequest; +import org.elasticsearch.client.ml.GetModelSnapshotsResponse; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PostCalendarEventRequest; +import org.elasticsearch.client.ml.PostCalendarEventResponse; import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PostDataResponse; import org.elasticsearch.client.ml.PreviewDatafeedRequest; import org.elasticsearch.client.ml.PreviewDatafeedResponse; +import org.elasticsearch.client.ml.PutCalendarJobRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; @@ -66,6 +74,8 @@ import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.RevertModelSnapshotRequest; +import org.elasticsearch.client.ml.RevertModelSnapshotResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; @@ -73,8 +83,12 @@ import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.UpdateModelSnapshotRequest; +import org.elasticsearch.client.ml.UpdateModelSnapshotResponse; import org.elasticsearch.client.ml.calendars.Calendar; import org.elasticsearch.client.ml.calendars.CalendarTests; +import org.elasticsearch.client.ml.calendars.ScheduledEvent; +import org.elasticsearch.client.ml.calendars.ScheduledEventTests; import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.datafeed.DatafeedState; import org.elasticsearch.client.ml.datafeed.DatafeedStats; @@ -86,7 +100,9 @@ import org.elasticsearch.client.ml.job.config.JobState; import org.elasticsearch.client.ml.job.config.JobUpdate; import org.elasticsearch.client.ml.job.config.MlFilter; +import org.elasticsearch.client.ml.job.process.ModelSnapshot; import org.elasticsearch.client.ml.job.stats.JobStats; +import org.elasticsearch.client.ml.job.util.PageParams; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; @@ -826,6 +842,49 @@ public void testPutCalendar() throws IOException { assertThat(putCalendarResponse.getCalendar(), equalTo(calendar)); } + public void testPutCalendarJob() throws IOException { + Calendar calendar = new Calendar("put-calendar-job-id", Collections.singletonList("put-calendar-job-0"), null); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + PutCalendarResponse putCalendarResponse = + machineLearningClient.putCalendar(new PutCalendarRequest(calendar), RequestOptions.DEFAULT); + + assertThat(putCalendarResponse.getCalendar().getJobIds(), containsInAnyOrder( "put-calendar-job-0")); + + String jobId1 = "put-calendar-job-1"; + String jobId2 = "put-calendar-job-2"; + + PutCalendarJobRequest putCalendarJobRequest = new PutCalendarJobRequest(calendar.getId(), jobId1, jobId2); + + putCalendarResponse = execute(putCalendarJobRequest, + machineLearningClient::putCalendarJob, + machineLearningClient::putCalendarJobAsync); + + assertThat(putCalendarResponse.getCalendar().getJobIds(), containsInAnyOrder(jobId1, jobId2, "put-calendar-job-0")); + } + + public void testDeleteCalendarJob() throws IOException { + Calendar calendar = new Calendar("del-calendar-job-id", + Arrays.asList("del-calendar-job-0", "del-calendar-job-1", "del-calendar-job-2"), + null); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + PutCalendarResponse putCalendarResponse = + machineLearningClient.putCalendar(new PutCalendarRequest(calendar), RequestOptions.DEFAULT); + + assertThat(putCalendarResponse.getCalendar().getJobIds(), + containsInAnyOrder("del-calendar-job-0", "del-calendar-job-1", "del-calendar-job-2")); + + String jobId1 = "del-calendar-job-0"; + String jobId2 = "del-calendar-job-2"; + + DeleteCalendarJobRequest deleteCalendarJobRequest = new DeleteCalendarJobRequest(calendar.getId(), jobId1, jobId2); + + putCalendarResponse = execute(deleteCalendarJobRequest, + machineLearningClient::deleteCalendarJob, + machineLearningClient::deleteCalendarJobAsync); + + assertThat(putCalendarResponse.getCalendar().getJobIds(), containsInAnyOrder("del-calendar-job-1")); + } + public void testGetCalendars() throws Exception { Calendar calendar1 = CalendarTests.testInstance(); Calendar calendar2 = CalendarTests.testInstance(); @@ -868,6 +927,65 @@ public void testDeleteCalendar() throws IOException { assertThat(exception.status().getStatus(), equalTo(404)); } + public void testGetCalendarEvent() throws Exception { + Calendar calendar = new Calendar("get-calendar-event-id", Collections.singletonList("get-calendar-event-job"), null); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putCalendar(new PutCalendarRequest(calendar), RequestOptions.DEFAULT); + + List events = new ArrayList<>(3); + for (int i = 0; i < 3; i++) { + events.add(ScheduledEventTests.testInstance(calendar.getId(), null)); + } + machineLearningClient.postCalendarEvent(new PostCalendarEventRequest(calendar.getId(), events), RequestOptions.DEFAULT); + + { + GetCalendarEventsRequest getCalendarEventsRequest = new GetCalendarEventsRequest(calendar.getId()); + + GetCalendarEventsResponse getCalendarEventsResponse = execute(getCalendarEventsRequest, + machineLearningClient::getCalendarEvents, + machineLearningClient::getCalendarEventsAsync); + assertThat(getCalendarEventsResponse.events().size(), equalTo(3)); + assertThat(getCalendarEventsResponse.count(), equalTo(3L)); + } + { + GetCalendarEventsRequest getCalendarEventsRequest = new GetCalendarEventsRequest(calendar.getId()); + getCalendarEventsRequest.setPageParams(new PageParams(1, 2)); + GetCalendarEventsResponse getCalendarEventsResponse = execute(getCalendarEventsRequest, + machineLearningClient::getCalendarEvents, + machineLearningClient::getCalendarEventsAsync); + assertThat(getCalendarEventsResponse.events().size(), equalTo(2)); + assertThat(getCalendarEventsResponse.count(), equalTo(3L)); + } + { + machineLearningClient.putJob(new PutJobRequest(buildJob("get-calendar-event-job")), RequestOptions.DEFAULT); + GetCalendarEventsRequest getCalendarEventsRequest = new GetCalendarEventsRequest("_all"); + getCalendarEventsRequest.setJobId("get-calendar-event-job"); + GetCalendarEventsResponse getCalendarEventsResponse = execute(getCalendarEventsRequest, + machineLearningClient::getCalendarEvents, + machineLearningClient::getCalendarEventsAsync); + assertThat(getCalendarEventsResponse.events().size(), equalTo(3)); + assertThat(getCalendarEventsResponse.count(), equalTo(3L)); + } + } + + public void testPostCalendarEvent() throws Exception { + Calendar calendar = CalendarTests.testInstance(); + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + machineLearningClient.putCalendar(new PutCalendarRequest(calendar), RequestOptions.DEFAULT); + + List events = new ArrayList<>(3); + for (int i = 0; i < 3; i++) { + events.add(ScheduledEventTests.testInstance(calendar.getId(), null)); + } + + PostCalendarEventRequest postCalendarEventRequest = new PostCalendarEventRequest(calendar.getId(), events); + + PostCalendarEventResponse postCalendarEventResponse = execute(postCalendarEventRequest, + machineLearningClient::postCalendarEvent, + machineLearningClient::postCalendarEventAsync); + assertThat(postCalendarEventResponse.getScheduledEvents(), containsInAnyOrder(events.toArray())); + } + public void testPutFilter() throws Exception { String filterId = "filter-job-test"; MlFilter mlFilter = MlFilter.builder(filterId) @@ -1026,10 +1144,11 @@ private String createAndPutDatafeed(String jobId, String indexName) throws IOExc } public void createModelSnapshot(String jobId, String snapshotId) throws IOException { + String documentId = jobId + "_model_snapshot_" + snapshotId; Job job = MachineLearningIT.buildJob(jobId); highLevelClient().machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); - IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc"); + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc", documentId); indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); indexRequest.source("{\"job_id\":\"" + jobId + "\", \"timestamp\":1541587919000, " + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", " + @@ -1043,6 +1162,28 @@ public void createModelSnapshot(String jobId, String snapshotId) throws IOExcept highLevelClient().index(indexRequest, RequestOptions.DEFAULT); } + public void createModelSnapshots(String jobId, List snapshotIds) throws IOException { + Job job = MachineLearningIT.buildJob(jobId); + highLevelClient().machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + for(String snapshotId : snapshotIds) { + String documentId = jobId + "_model_snapshot_" + snapshotId; + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc", documentId); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"" + jobId + "\", \"timestamp\":1541587919000, " + + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", " + + "\"snapshot_id\":\"" + snapshotId + "\", \"snapshot_doc_count\":1, \"model_size_stats\":{" + + "\"job_id\":\"" + jobId + "\", \"result_type\":\"model_size_stats\",\"model_bytes\":51722, " + + "\"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," + + "\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000, " + + "\"timestamp\":1519930800000}, \"latest_record_time_stamp\":1519931700000," + + "\"latest_result_time_stamp\":1519930800000, \"retain\":false, " + + "\"quantiles\":{\"job_id\":\""+jobId+"\", \"timestamp\":1541587919000, " + + "\"quantile_state\":\"state\"}}", XContentType.JSON); + highLevelClient().index(indexRequest, RequestOptions.DEFAULT); + } + } + public void testDeleteModelSnapshot() throws IOException { String jobId = "test-delete-model-snapshot"; String snapshotId = "1541587919"; @@ -1058,4 +1199,72 @@ public void testDeleteModelSnapshot() throws IOException { assertTrue(response.isAcknowledged()); } + + public void testUpdateModelSnapshot() throws Exception { + String jobId = "test-update-model-snapshot"; + + String snapshotId = "1541587919"; + createModelSnapshot(jobId, snapshotId); + + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + GetModelSnapshotsRequest getModelSnapshotsRequest = new GetModelSnapshotsRequest(jobId); + + GetModelSnapshotsResponse getModelSnapshotsResponse1 = execute(getModelSnapshotsRequest, machineLearningClient::getModelSnapshots, + machineLearningClient::getModelSnapshotsAsync); + + assertEquals(getModelSnapshotsResponse1.count(), 1L); + assertEquals("State persisted due to job close at 2018-11-07T10:51:59+0000", + getModelSnapshotsResponse1.snapshots().get(0).getDescription()); + + UpdateModelSnapshotRequest request = new UpdateModelSnapshotRequest(jobId, snapshotId); + request.setDescription("Updated description"); + request.setRetain(true); + + UpdateModelSnapshotResponse response = execute(request, machineLearningClient::updateModelSnapshot, + machineLearningClient::updateModelSnapshotAsync); + + assertTrue(response.getAcknowledged()); + assertEquals("Updated description", response.getModel().getDescription()); + assertTrue(response.getModel().getRetain()); + + GetModelSnapshotsResponse getModelSnapshotsResponse2 = execute(getModelSnapshotsRequest, machineLearningClient::getModelSnapshots, + machineLearningClient::getModelSnapshotsAsync); + + assertEquals(getModelSnapshotsResponse2.count(), 1L); + assertEquals("Updated description", + getModelSnapshotsResponse2.snapshots().get(0).getDescription()); + } + + public void testRevertModelSnapshot() throws IOException { + String jobId = "test-revert-model-snapshot"; + + List snapshotIds = new ArrayList<>(); + + String snapshotId1 = "1541587919"; + String snapshotId2 = "1541588919"; + String snapshotId3 = "1541589919"; + + snapshotIds.add(snapshotId1); + snapshotIds.add(snapshotId2); + snapshotIds.add(snapshotId3); + + createModelSnapshots(jobId, snapshotIds); + + MachineLearningClient machineLearningClient = highLevelClient().machineLearning(); + + for (String snapshotId : snapshotIds){ + RevertModelSnapshotRequest request = new RevertModelSnapshotRequest(jobId, snapshotId); + if (randomBoolean()) { + request.setDeleteInterveningResults(randomBoolean()); + } + + RevertModelSnapshotResponse response = execute(request, machineLearningClient::revertModelSnapshot, + machineLearningClient::revertModelSnapshotAsync); + + ModelSnapshot model = response.getModel(); + + assertEquals(snapshotId, model.getSnapshotId()); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java index afc5e99b5f03a..f94cc41432c4c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java @@ -106,15 +106,19 @@ public void testReindexTask() throws IOException, InterruptedException { ); } { - ReindexRequest reindexRequest = new ReindexRequest(); + // tag::submit-reindex-task + ReindexRequest reindexRequest = new ReindexRequest(); // <1> reindexRequest.setSourceIndices(sourceIndex); reindexRequest.setDestIndex(destinationIndex); - reindexRequest.setSourceQuery(new IdsQueryBuilder().addIds("1").types("type")); reindexRequest.setRefresh(true); - TaskSubmissionResponse reindexSubmission = highLevelClient().submitReindexTask(reindexRequest, RequestOptions.DEFAULT); + TaskSubmissionResponse reindexSubmission = highLevelClient() + .submitReindexTask(reindexRequest, RequestOptions.DEFAULT); // <2> + + String taskId = reindexSubmission.getTask(); // <3> + // end::submit-reindex-task - BooleanSupplier hasUpgradeCompleted = checkCompletionStatus(reindexSubmission.getTask()); + BooleanSupplier hasUpgradeCompleted = checkCompletionStatus(taskId); awaitBusy(hasUpgradeCompleted); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index c2238f88454f5..21147446039f5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -53,6 +53,7 @@ import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.action.support.replication.ReplicationRequest; +import org.elasticsearch.client.core.MultiTermVectorsRequest; import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.client.RequestConverters.EndpointBuilder; @@ -101,6 +102,7 @@ import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; +import org.hamcrest.Matchers; import java.io.IOException; import java.io.InputStream; @@ -890,6 +892,21 @@ public void testBulkWithDifferentContentTypes() throws IOException { } } + public void testGlobalPipelineOnBulkRequest() throws IOException { + BulkRequest bulkRequest = new BulkRequest(); + bulkRequest.pipeline("xyz"); + bulkRequest.add(new IndexRequest("test", "doc", "11") + .source(XContentType.JSON, "field", "bulk1")); + bulkRequest.add(new IndexRequest("test", "doc", "12") + .source(XContentType.JSON, "field", "bulk2")); + bulkRequest.add(new IndexRequest("test", "doc", "13") + .source(XContentType.JSON, "field", "bulk3")); + + Request request = RequestConverters.bulk(bulkRequest); + + assertThat(request.getParameters(), Matchers.hasEntry("pipeline","xyz")); + } + public void testSearchNullSource() throws IOException { SearchRequest searchRequest = new SearchRequest(); Request request = RequestConverters.search(searchRequest); @@ -1315,6 +1332,26 @@ public void testTermVectors() throws IOException { assertToXContentBody(tvRequest, request.getEntity()); } + public void testMultiTermVectors() throws IOException { + MultiTermVectorsRequest mtvRequest = new MultiTermVectorsRequest(); + + int numberOfRequests = randomIntBetween(0, 5); + for (int i = 0; i < numberOfRequests; i++) { + String index = randomAlphaOfLengthBetween(3, 10); + String type = randomAlphaOfLengthBetween(3, 10); + String id = randomAlphaOfLengthBetween(3, 10); + TermVectorsRequest tvRequest = new TermVectorsRequest(index, type, id); + String[] fields = generateRandomStringArray(10, 5, false, false); + tvRequest.setFields(fields); + mtvRequest.add(tvRequest); + } + + Request request = RequestConverters.mtermVectors(mtvRequest); + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals("_mtermvectors", request.getEndpoint()); + assertToXContentBody(mtvRequest, request.getEntity()); + } + public void testFieldCaps() { // Create a random request. String[] indices = randomIndicesNames(0, 5); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java index b433be3f5c513..e9f45b843f94d 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RestHighLevelClientTests.java @@ -689,7 +689,6 @@ public void testApiNamingConventions() throws Exception { "indices.exists_type", "indices.get_upgrade", "indices.put_alias", - "mtermvectors", "render_search_template", "scripts_painless_execute" }; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index 4616234f14509..616850c513af7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -144,6 +144,14 @@ public void testCreateSnapshot() throws IOException { CreateSnapshotResponse response = createTestSnapshot(request); assertEquals(waitForCompletion ? RestStatus.OK : RestStatus.ACCEPTED, response.status()); + if (waitForCompletion == false) { + // If we don't wait for the snapshot to complete we have to cancel it to not leak the snapshot task + AcknowledgedResponse deleteResponse = execute( + new DeleteSnapshotRequest(repository, snapshot), + highLevelClient().snapshot()::delete, highLevelClient().snapshot()::deleteAsync + ); + assertTrue(deleteResponse.isAcknowledged()); + } } public void testGetSnapshots() throws IOException { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/ResumeFollowRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/ResumeFollowRequestTests.java new file mode 100644 index 0000000000000..3f00891331839 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ccr/ResumeFollowRequestTests.java @@ -0,0 +1,116 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ccr; + +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +public class ResumeFollowRequestTests extends AbstractXContentTestCase { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("test_parser", + true, (args) -> new ResumeFollowRequest((String) args[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), PutFollowRequest.FOLLOWER_INDEX_FIELD); + PARSER.declareInt(ResumeFollowRequest::setMaxReadRequestOperationCount, FollowConfig.MAX_READ_REQUEST_OPERATION_COUNT); + PARSER.declareField( + ResumeFollowRequest::setMaxReadRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_READ_REQUEST_SIZE.getPreferredName()), + PutFollowRequest.MAX_READ_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(ResumeFollowRequest::setMaxOutstandingReadRequests, FollowConfig.MAX_OUTSTANDING_READ_REQUESTS); + PARSER.declareInt(ResumeFollowRequest::setMaxWriteRequestOperationCount, FollowConfig.MAX_WRITE_REQUEST_OPERATION_COUNT); + PARSER.declareField( + ResumeFollowRequest::setMaxWriteRequestSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_REQUEST_SIZE.getPreferredName()), + PutFollowRequest.MAX_WRITE_REQUEST_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareInt(ResumeFollowRequest::setMaxOutstandingWriteRequests, FollowConfig.MAX_OUTSTANDING_WRITE_REQUESTS); + PARSER.declareInt(ResumeFollowRequest::setMaxWriteBufferCount, FollowConfig.MAX_WRITE_BUFFER_COUNT); + PARSER.declareField( + ResumeFollowRequest::setMaxWriteBufferSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), FollowConfig.MAX_WRITE_BUFFER_SIZE.getPreferredName()), + PutFollowRequest.MAX_WRITE_BUFFER_SIZE, + ObjectParser.ValueType.STRING); + PARSER.declareField( + ResumeFollowRequest::setMaxRetryDelay, + (p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.MAX_RETRY_DELAY_FIELD.getPreferredName()), + PutFollowRequest.MAX_RETRY_DELAY_FIELD, + ObjectParser.ValueType.STRING); + PARSER.declareField( + ResumeFollowRequest::setReadPollTimeout, + (p, c) -> TimeValue.parseTimeValue(p.text(), FollowConfig.READ_POLL_TIMEOUT.getPreferredName()), + PutFollowRequest.READ_POLL_TIMEOUT, + ObjectParser.ValueType.STRING); + } + + @Override + protected ResumeFollowRequest doParseInstance(XContentParser parser) throws IOException { + return PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected ResumeFollowRequest createTestInstance() { + ResumeFollowRequest resumeFollowRequest = new ResumeFollowRequest(randomAlphaOfLength(4)); + if (randomBoolean()) { + resumeFollowRequest.setMaxOutstandingReadRequests(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + resumeFollowRequest.setMaxOutstandingWriteRequests(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + resumeFollowRequest.setMaxReadRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + resumeFollowRequest.setMaxReadRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + resumeFollowRequest.setMaxWriteBufferCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + resumeFollowRequest.setMaxWriteBufferSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + resumeFollowRequest.setMaxWriteRequestOperationCount(randomIntBetween(0, Integer.MAX_VALUE)); + } + if (randomBoolean()) { + resumeFollowRequest.setMaxWriteRequestSize(new ByteSizeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + resumeFollowRequest.setMaxRetryDelay(new TimeValue(randomNonNegativeLong())); + } + if (randomBoolean()) { + resumeFollowRequest.setReadPollTimeout(new TimeValue(randomNonNegativeLong())); + } + return resumeFollowRequest; + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MultiTermVectorsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MultiTermVectorsResponseTests.java new file mode 100644 index 0000000000000..6db3139fbc569 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/MultiTermVectorsResponseTests.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.core; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + +public class MultiTermVectorsResponseTests extends ESTestCase { + + public void testFromXContent() throws IOException { + xContentTester( + this::createParser, + this::createTestInstance, + this::toXContent, + MultiTermVectorsResponse::fromXContent) + .supportsUnknownFields(true) + .randomFieldsExcludeFilter(field -> + field.endsWith("term_vectors") || field.endsWith("terms") || field.endsWith("tokens")) + .test(); + } + + private void toXContent(MultiTermVectorsResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + List termVectorsResponseList = response.getTermVectorsResponses(); + if (termVectorsResponseList != null) { + builder.startArray("docs"); + for (TermVectorsResponse tvr : termVectorsResponseList) { + TermVectorsResponseTests.toXContent(tvr, builder); + } + builder.endArray(); + } + builder.endObject(); + } + + protected MultiTermVectorsResponse createTestInstance() { + int numberOfResponses = randomIntBetween(0, 5); + List responses = new ArrayList<>(numberOfResponses); + for (int i = 0; i < numberOfResponses; i++) { + TermVectorsResponse tvResponse = TermVectorsResponseTests.createTestInstance(); + responses.add(tvResponse); + } + return new MultiTermVectorsResponse(responses); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/TermVectorsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/TermVectorsResponseTests.java index 473bb34f4e660..714a7269a19d9 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/TermVectorsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/TermVectorsResponseTests.java @@ -35,8 +35,8 @@ public class TermVectorsResponseTests extends ESTestCase { public void testFromXContent() throws IOException { xContentTester( this::createParser, - this::createTestInstance, - this::toXContent, + TermVectorsResponseTests::createTestInstance, + TermVectorsResponseTests::toXContent, TermVectorsResponse::fromXContent) .supportsUnknownFields(true) .randomFieldsExcludeFilter(field -> @@ -44,7 +44,7 @@ public void testFromXContent() throws IOException { .test(); } - private void toXContent(TermVectorsResponse response, XContentBuilder builder) throws IOException { + static void toXContent(TermVectorsResponse response, XContentBuilder builder) throws IOException { builder.startObject(); builder.field("_index", response.getIndex()); builder.field("_type", response.getType()); @@ -66,7 +66,7 @@ private void toXContent(TermVectorsResponse response, XContentBuilder builder) t builder.endObject(); } - private void toXContent(TermVectorsResponse.TermVector tv, XContentBuilder builder) throws IOException { + private static void toXContent(TermVectorsResponse.TermVector tv, XContentBuilder builder) throws IOException { builder.startObject(tv.getFieldName()); // build fields_statistics if (tv.getFieldStatistics() != null) { @@ -117,7 +117,7 @@ private void toXContent(TermVectorsResponse.TermVector tv, XContentBuilder build } - protected TermVectorsResponse createTestInstance() { + static TermVectorsResponse createTestInstance() { String index = randomAlphaOfLength(5); String type = randomAlphaOfLength(5); String id = String.valueOf(randomIntBetween(1,100)); @@ -148,7 +148,7 @@ protected TermVectorsResponse createTestInstance() { - private TermVectorsResponse.TermVector randomTermVector(String fieldName, boolean hasFieldStatistics, boolean hasTermStatistics, + private static TermVectorsResponse.TermVector randomTermVector(String fieldName, boolean hasFieldStatistics, boolean hasTermStatistics, boolean hasScores, boolean hasOffsets, boolean hasPositions, boolean hasPayloads) { TermVectorsResponse.TermVector.FieldStatistics fs = null; if (hasFieldStatistics) { @@ -171,7 +171,7 @@ private TermVectorsResponse.TermVector randomTermVector(String fieldName, boolea return tv; } - private TermVectorsResponse.TermVector.Term randomTerm(String termTxt, boolean hasTermStatistics, boolean hasScores, + private static TermVectorsResponse.TermVector.Term randomTerm(String termTxt, boolean hasTermStatistics, boolean hasScores, boolean hasOffsets, boolean hasPositions, boolean hasPayloads) { int termFreq = randomInt(10000); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index 8df7e40fc9e77..4dce147925c8a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; @@ -35,6 +36,8 @@ import org.elasticsearch.client.ccr.PauseFollowRequest; import org.elasticsearch.client.ccr.PutFollowRequest; import org.elasticsearch.client.ccr.PutFollowResponse; +import org.elasticsearch.client.ccr.ResumeFollowRequest; +import org.elasticsearch.client.ccr.UnfollowRequest; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; @@ -46,7 +49,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; public class CCRDocumentationIT extends ESRestHighLevelClientTestCase { @@ -198,11 +200,9 @@ public void onFailure(Exception e) { // Resume follow index, so that it can be paused again: { - // TODO: Replace this with high level rest client code when resume follow API is available: - final Request req = new Request("POST", "/" + followIndex + "/_ccr/resume_follow"); - req.setJsonEntity("{}"); - Response res = client().performRequest(req); - assertThat(res.getStatusLine().getStatusCode(), equalTo(200)); + ResumeFollowRequest resumeFollowRequest = new ResumeFollowRequest(followIndex); + AcknowledgedResponse resumeResponse = client.ccr().resumeFollow(resumeFollowRequest, RequestOptions.DEFAULT); + assertThat(resumeResponse.isAcknowledged(), is(true)); } // Replace the empty listener by a blocking listener in test @@ -217,6 +217,164 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testResumeFollow() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + // Create leader index: + CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); + createIndexRequest.settings(Collections.singletonMap("index.soft_deletes.enabled", true)); + CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + String followIndex = "follower"; + // Follow index, so that it can be paused: + { + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex); + PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); + assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); + assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); + assertThat(putFollowResponse.isIndexFollowingStarted(), is(true)); + } + + // Pause follow index, so that it can be resumed: + { + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(followIndex); + AcknowledgedResponse pauseResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertThat(pauseResponse.isAcknowledged(), is(true)); + } + + // tag::ccr-resume-follow-request + ResumeFollowRequest request = new ResumeFollowRequest(followIndex); // <1> + // end::ccr-resume-follow-request + + // tag::ccr-resume-follow-execute + AcknowledgedResponse response = + client.ccr().resumeFollow(request, RequestOptions.DEFAULT); + // end::ccr-resume-follow-execute + + // tag::ccr-resume-follow-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ccr-resume-follow-response + + // Pause follow index, so that it can be resumed again: + { + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(followIndex); + AcknowledgedResponse pauseResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertThat(pauseResponse.isAcknowledged(), is(true)); + } + + // tag::ccr-resume-follow-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ccr-resume-follow-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ccr-resume-follow-execute-async + client.ccr() + .resumeFollowAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::ccr-resume-follow-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + public void testUnfollow() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + // Create leader index: + CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); + createIndexRequest.settings(Collections.singletonMap("index.soft_deletes.enabled", true)); + CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + String followIndex = "follower"; + // Follow index, pause and close, so that it can be unfollowed: + { + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex); + PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); + assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); + assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); + assertThat(putFollowResponse.isIndexFollowingStarted(), is(true)); + + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(followIndex); + AcknowledgedResponse unfollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertThat(unfollowResponse.isAcknowledged(), is(true)); + + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(followIndex); + assertThat(client.indices().close(closeIndexRequest, RequestOptions.DEFAULT).isAcknowledged(), is(true)); + } + + // tag::ccr-unfollow-request + UnfollowRequest request = new UnfollowRequest(followIndex); // <1> + // end::ccr-unfollow-request + + // tag::ccr-unfollow-execute + AcknowledgedResponse response = + client.ccr().unfollow(request, RequestOptions.DEFAULT); + // end::ccr-unfollow-execute + + // tag::ccr-unfollow-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ccr-unfollow-response + + // Delete, put follow index, pause and close, so that it can be unfollowed again: + { + DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(followIndex); + assertThat(client.indices().delete(deleteIndexRequest, RequestOptions.DEFAULT).isAcknowledged(), is(true)); + + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followIndex); + PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); + assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); + assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); + assertThat(putFollowResponse.isIndexFollowingStarted(), is(true)); + + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest(followIndex); + AcknowledgedResponse unfollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertThat(unfollowResponse.isAcknowledged(), is(true)); + + CloseIndexRequest closeIndexRequest = new CloseIndexRequest(followIndex); + assertThat(client.indices().close(closeIndexRequest, RequestOptions.DEFAULT).isAcknowledged(), is(true)); + } + + // tag::ccr-unfollow-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ccr-unfollow-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ccr-unfollow-execute-async + client.ccr() + .unfollowAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::ccr-unfollow-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + static Map toMap(Response response) throws IOException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 41036f900f411..adc38de06e988 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -54,6 +54,8 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RethrottleRequest; +import org.elasticsearch.client.core.MultiTermVectorsRequest; +import org.elasticsearch.client.core.MultiTermVectorsResponse; import org.elasticsearch.client.core.TermVectorsRequest; import org.elasticsearch.client.core.TermVectorsResponse; import org.elasticsearch.common.Strings; @@ -755,6 +757,16 @@ public void testBulk() throws Exception { request.waitForActiveShards(2); // <1> request.waitForActiveShards(ActiveShardCount.ALL); // <2> // end::bulk-request-active-shards + // tag::bulk-request-pipeline + request.pipeline("pipelineId"); // <1> + // end::bulk-request-pipeline + // tag::bulk-request-routing + request.routing("routingId"); // <1> + // end::bulk-request-routing + + // tag::bulk-request-index-type + BulkRequest defaulted = new BulkRequest("posts","_doc"); // <1> + // end::bulk-request-index-type // tag::bulk-execute-listener ActionListener listener = new ActionListener() { @@ -1667,6 +1679,80 @@ public void onFailure(Exception e) { } + + // Not entirely sure if _mtermvectors belongs to CRUD, and in the absence of a better place, will have it here + public void testMultiTermVectors() throws Exception { + RestHighLevelClient client = highLevelClient(); + CreateIndexRequest authorsRequest = new CreateIndexRequest("authors").mapping("_doc", "user", "type=text"); + CreateIndexResponse authorsResponse = client.indices().create(authorsRequest, RequestOptions.DEFAULT); + assertTrue(authorsResponse.isAcknowledged()); + client.index(new IndexRequest("index", "_doc", "1").source("user", "kimchy"), RequestOptions.DEFAULT); + client.index(new IndexRequest("index", "_doc", "2").source("user", "s1monw"), RequestOptions.DEFAULT); + Response refreshResponse = client().performRequest(new Request("POST", "/authors/_refresh")); + assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); + + { + // tag::multi-term-vectors-request + MultiTermVectorsRequest request = new MultiTermVectorsRequest(); // <1> + TermVectorsRequest tvrequest1 = + new TermVectorsRequest("authors", "_doc", "1"); + tvrequest1.setFields("user"); + request.add(tvrequest1); // <2> + + XContentBuilder docBuilder = XContentFactory.jsonBuilder(); + docBuilder.startObject().field("user", "guest-user").endObject(); + TermVectorsRequest tvrequest2 = + new TermVectorsRequest("authors", "_doc", docBuilder); + request.add(tvrequest2); // <3> + // end::multi-term-vectors-request + } + + // tag::multi-term-vectors-request-template + TermVectorsRequest tvrequestTemplate = + new TermVectorsRequest("authors", "_doc", "fake_id"); // <1> + tvrequestTemplate.setFields("user"); + String[] ids = {"1", "2"}; + MultiTermVectorsRequest request = + new MultiTermVectorsRequest(ids, tvrequestTemplate); // <2> + // end::multi-term-vectors-request-template + + // tag::multi-term-vectors-execute + MultiTermVectorsResponse response = + client.mtermvectors(request, RequestOptions.DEFAULT); + // end::multi-term-vectors-execute + + // tag::multi-term-vectors-response + List tvresponseList = + response.getTermVectorsResponses(); // <1> + if (tvresponseList != null) { + for (TermVectorsResponse tvresponse : tvresponseList) { + } + } + // end::multi-term-vectors-response + + ActionListener listener; + // tag::multi-term-vectors-execute-listener + listener = new ActionListener() { + @Override + public void onResponse(MultiTermVectorsResponse mtvResponse) { + // <1> + } + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::multi-term-vectors-execute-listener + CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + // tag::multi-term-vectors-execute-async + client.mtermvectorsAsync( + request, RequestOptions.DEFAULT, listener); // <1> + // end::multi-term-vectors-execute-async + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + } + public void testMultiGet() throws Exception { RestHighLevelClient client = highLevelClient(); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java index 4ff50ba33e752..712944219acd3 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ILMDocumentationIT.java @@ -22,6 +22,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.Response; @@ -29,21 +30,31 @@ import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.indexlifecycle.DeleteAction; import org.elasticsearch.client.indexlifecycle.DeleteLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.ExplainLifecycleRequest; import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.GetLifecyclePolicyResponse; import org.elasticsearch.client.indexlifecycle.LifecycleAction; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusRequest; +import org.elasticsearch.client.indexlifecycle.LifecycleManagementStatusResponse; import org.elasticsearch.client.indexlifecycle.LifecyclePolicy; +import org.elasticsearch.client.indexlifecycle.OperationMode; import org.elasticsearch.client.indexlifecycle.LifecyclePolicyMetadata; import org.elasticsearch.client.indexlifecycle.Phase; import org.elasticsearch.client.indexlifecycle.PutLifecyclePolicyRequest; +import org.elasticsearch.client.indexlifecycle.RetryLifecyclePolicyRequest; import org.elasticsearch.client.indexlifecycle.RolloverAction; +import org.elasticsearch.client.indexlifecycle.StartILMRequest; +import org.elasticsearch.client.indexlifecycle.StopILMRequest; import org.elasticsearch.client.indexlifecycle.ShrinkAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.hamcrest.Matchers; import java.io.IOException; import java.util.Collections; @@ -52,6 +63,8 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static org.hamcrest.Matchers.equalTo; + public class ILMDocumentationIT extends ESRestHighLevelClientTestCase { public void testPutLifecyclePolicy() throws Exception { @@ -64,14 +77,14 @@ public void testPutLifecyclePolicy() throws Exception { new ByteSizeValue(50, ByteSizeUnit.GB), null, null)); phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions)); // <1> - Map deleteActions = + Map deleteActions = Collections.singletonMap(DeleteAction.NAME, new DeleteAction()); - phases.put("delete", new Phase("delete", + phases.put("delete", new Phase("delete", new TimeValue(90, TimeUnit.DAYS), deleteActions)); // <2> LifecyclePolicy policy = new LifecyclePolicy("my_policy", phases); // <3> - PutLifecyclePolicyRequest request = + PutLifecyclePolicyRequest request = new PutLifecyclePolicyRequest(policy); // end::ilm-put-lifecycle-policy-request @@ -88,10 +101,10 @@ public void testPutLifecyclePolicy() throws Exception { // Delete the policy so it can be added again { - DeleteLifecyclePolicyRequest deleteRequest = + DeleteLifecyclePolicyRequest deleteRequest = new DeleteLifecyclePolicyRequest("my_policy"); AcknowledgedResponse deleteResponse = client.indexLifecycle() - .deleteLifecyclePolicy(deleteRequest, + .deleteLifecyclePolicy(deleteRequest, RequestOptions.DEFAULT); assertTrue(deleteResponse.isAcknowledged()); } @@ -116,7 +129,7 @@ public void onFailure(Exception e) { listener = new LatchedActionListener<>(listener, latch); // tag::ilm-put-lifecycle-policy-execute-async - client.indexLifecycle().putLifecyclePolicyAsync(request, + client.indexLifecycle().putLifecyclePolicyAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::ilm-put-lifecycle-policy-execute-async @@ -124,6 +137,80 @@ public void onFailure(Exception e) { } + public void testDeletePolicy() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + // Set up a policy so we have something to delete + PutLifecyclePolicyRequest putRequest; + { + Map phases = new HashMap<>(); + Map hotActions = new HashMap<>(); + hotActions.put(RolloverAction.NAME, new RolloverAction( + new ByteSizeValue(50, ByteSizeUnit.GB), null, null)); + phases.put("hot", new Phase("hot", TimeValue.ZERO, hotActions)); + Map deleteActions = + Collections.singletonMap(DeleteAction.NAME, + new DeleteAction()); + phases.put("delete", + new Phase("delete", + new TimeValue(90, TimeUnit.DAYS), deleteActions)); + LifecyclePolicy myPolicy = new LifecyclePolicy("my_policy", phases); + putRequest = new PutLifecyclePolicyRequest(myPolicy); + AcknowledgedResponse putResponse = client.indexLifecycle(). + putLifecyclePolicy(putRequest, RequestOptions.DEFAULT); + assertTrue(putResponse.isAcknowledged()); + } + + // tag::ilm-delete-lifecycle-policy-request + DeleteLifecyclePolicyRequest request = + new DeleteLifecyclePolicyRequest("my_policy"); // <1> + // end::ilm-delete-lifecycle-policy-request + + // tag::ilm-delete-lifecycle-policy-execute + AcknowledgedResponse response = client.indexLifecycle() + .deleteLifecyclePolicy(request, RequestOptions.DEFAULT); + // end::ilm-delete-lifecycle-policy-execute + + // tag::ilm-delete-lifecycle-policy-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ilm-delete-lifecycle-policy-response + + assertTrue(acknowledged); + + // Put the policy again so we can delete it again + { + AcknowledgedResponse putResponse = client.indexLifecycle(). + putLifecyclePolicy(putRequest, RequestOptions.DEFAULT); + assertTrue(putResponse.isAcknowledged()); + } + + // tag::ilm-delete-lifecycle-policy-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ilm-delete-lifecycle-policy-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ilm-delete-lifecycle-policy-execute-async + client.indexLifecycle().deleteLifecyclePolicyAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ilm-delete-lifecycle-policy-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + public void testGetLifecyclePolicy() throws IOException, InterruptedException { RestHighLevelClient client = highLevelClient(); @@ -226,6 +313,223 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testStartStopStatus() throws Exception { + RestHighLevelClient client = highLevelClient(); + + stopILM(client); + + // tag::ilm-status-request + LifecycleManagementStatusRequest request = + new LifecycleManagementStatusRequest(); + // end::ilm-status-request + + // Check that ILM has stopped + { + // tag::ilm-status-execute + LifecycleManagementStatusResponse response = + client.indexLifecycle() + .lifecycleManagementStatus(request, RequestOptions.DEFAULT); + // end::ilm-status-execute + + // tag::ilm-status-response + OperationMode operationMode = response.getOperationMode(); // <1> + // end::ilm-status-response + + assertThat(operationMode, Matchers.either(equalTo(OperationMode.STOPPING)).or(equalTo(OperationMode.STOPPED))); + } + + startILM(client); + + // tag::ilm-status-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse( + LifecycleManagementStatusResponse response) { + OperationMode operationMode = response + .getOperationMode(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ilm-status-execute-listener + + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ilm-status-execute-async + client.indexLifecycle().lifecycleManagementStatusAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ilm-status-execute-async + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + // Check that ILM is running again + LifecycleManagementStatusResponse response = + client.indexLifecycle() + .lifecycleManagementStatus(request, RequestOptions.DEFAULT); + + OperationMode operationMode = response.getOperationMode(); + assertEquals(OperationMode.RUNNING, operationMode); + } + + private void stopILM(RestHighLevelClient client) throws IOException, InterruptedException { + // tag::ilm-stop-ilm-request + StopILMRequest request = new StopILMRequest(); + // end::ilm-stop-ilm-request + + // tag::ilm-stop-ilm-execute + AcknowledgedResponse response = client.indexLifecycle() + .stopILM(request, RequestOptions.DEFAULT); + // end::ilm-stop-ilm-execute + + // tag::ilm-stop-ilm-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ilm-stop-ilm-response + assertTrue(acknowledged); + + // tag::ilm-stop-ilm-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ilm-stop-ilm-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ilm-stop-ilm-execute-async + client.indexLifecycle().stopILMAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ilm-stop-ilm-execute-async + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + private void startILM(RestHighLevelClient client) throws IOException, InterruptedException { + // tag::ilm-start-ilm-request + StartILMRequest request1 = new StartILMRequest(); + // end::ilm-start-ilm-request + + // tag::ilm-start-ilm-execute + AcknowledgedResponse response = client.indexLifecycle() + .startILM(request1, RequestOptions.DEFAULT); + // end::ilm-start-ilm-execute + + // tag::ilm-start-ilm-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ilm-start-ilm-response + + assertTrue(acknowledged); + + // tag::ilm-start-ilm-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ilm-start-ilm-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ilm-start-ilm-execute-async + client.indexLifecycle().startILMAsync(request1, + RequestOptions.DEFAULT, listener); // <1> + // end::ilm-start-ilm-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + + public void testRetryPolicy() throws Exception { + RestHighLevelClient client = highLevelClient(); + + // setup policy to immediately fail on index + { + Map phases = new HashMap<>(); + Map warmActions = new HashMap<>(); + warmActions.put(ShrinkAction.NAME, new ShrinkAction(1)); + phases.put("warm", new Phase("warm", TimeValue.ZERO, warmActions)); + + LifecyclePolicy policy = new LifecyclePolicy("my_policy", + phases); + PutLifecyclePolicyRequest putRequest = + new PutLifecyclePolicyRequest(policy); + client.indexLifecycle().putLifecyclePolicy(putRequest, RequestOptions.DEFAULT); + + CreateIndexRequest createIndexRequest = new CreateIndexRequest("my_index", + Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put("index.lifecycle.name", "my_policy") + .build()); + client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertBusy(() -> assertNotNull(client.indexLifecycle() + .explainLifecycle(new ExplainLifecycleRequest().indices("my_index"), RequestOptions.DEFAULT) + .getIndexResponses().get("my_index").getFailedStep())); + } + + // tag::ilm-retry-lifecycle-policy-request + RetryLifecyclePolicyRequest request = + new RetryLifecyclePolicyRequest("my_index"); // <1> + // end::ilm-retry-lifecycle-policy-request + + + // tag::ilm-retry-lifecycle-policy-execute + AcknowledgedResponse response = client.indexLifecycle() + .retryLifecyclePolicy(request, RequestOptions.DEFAULT); + // end::ilm-retry-lifecycle-policy-execute + + // tag::ilm-retry-lifecycle-policy-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::ilm-retry-lifecycle-policy-response + + assertTrue(acknowledged); + + // tag::ilm-retry-lifecycle-policy-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse response) { + boolean acknowledged = response.isAcknowledged(); // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::ilm-retry-lifecycle-policy-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::ilm-retry-lifecycle-policy-execute-async + client.indexLifecycle().retryLifecyclePolicyAsync(request, + RequestOptions.DEFAULT, listener); // <1> + // end::ilm-retry-lifecycle-policy-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + static Map toMap(Response response) throws IOException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java index 343acc16810f6..ee3a9eadebf55 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MlClientDocumentationIT.java @@ -35,6 +35,7 @@ import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.ml.CloseJobRequest; import org.elasticsearch.client.ml.CloseJobResponse; +import org.elasticsearch.client.ml.DeleteCalendarJobRequest; import org.elasticsearch.client.ml.DeleteCalendarRequest; import org.elasticsearch.client.ml.DeleteDatafeedRequest; import org.elasticsearch.client.ml.DeleteFilterRequest; @@ -48,6 +49,8 @@ import org.elasticsearch.client.ml.ForecastJobResponse; import org.elasticsearch.client.ml.GetBucketsRequest; import org.elasticsearch.client.ml.GetBucketsResponse; +import org.elasticsearch.client.ml.GetCalendarEventsRequest; +import org.elasticsearch.client.ml.GetCalendarEventsResponse; import org.elasticsearch.client.ml.GetCalendarsRequest; import org.elasticsearch.client.ml.GetCalendarsResponse; import org.elasticsearch.client.ml.GetCategoriesRequest; @@ -72,10 +75,13 @@ import org.elasticsearch.client.ml.GetRecordsResponse; import org.elasticsearch.client.ml.OpenJobRequest; import org.elasticsearch.client.ml.OpenJobResponse; +import org.elasticsearch.client.ml.PostCalendarEventRequest; +import org.elasticsearch.client.ml.PostCalendarEventResponse; import org.elasticsearch.client.ml.PostDataRequest; import org.elasticsearch.client.ml.PostDataResponse; import org.elasticsearch.client.ml.PreviewDatafeedRequest; import org.elasticsearch.client.ml.PreviewDatafeedResponse; +import org.elasticsearch.client.ml.PutCalendarJobRequest; import org.elasticsearch.client.ml.PutCalendarRequest; import org.elasticsearch.client.ml.PutCalendarResponse; import org.elasticsearch.client.ml.PutDatafeedRequest; @@ -84,6 +90,8 @@ import org.elasticsearch.client.ml.PutFilterResponse; import org.elasticsearch.client.ml.PutJobRequest; import org.elasticsearch.client.ml.PutJobResponse; +import org.elasticsearch.client.ml.RevertModelSnapshotRequest; +import org.elasticsearch.client.ml.RevertModelSnapshotResponse; import org.elasticsearch.client.ml.StartDatafeedRequest; import org.elasticsearch.client.ml.StartDatafeedResponse; import org.elasticsearch.client.ml.StopDatafeedRequest; @@ -91,7 +99,11 @@ import org.elasticsearch.client.ml.UpdateDatafeedRequest; import org.elasticsearch.client.ml.UpdateFilterRequest; import org.elasticsearch.client.ml.UpdateJobRequest; +import org.elasticsearch.client.ml.UpdateModelSnapshotRequest; +import org.elasticsearch.client.ml.UpdateModelSnapshotResponse; import org.elasticsearch.client.ml.calendars.Calendar; +import org.elasticsearch.client.ml.calendars.ScheduledEvent; +import org.elasticsearch.client.ml.calendars.ScheduledEventTests; import org.elasticsearch.client.ml.datafeed.ChunkingConfig; import org.elasticsearch.client.ml.datafeed.DatafeedConfig; import org.elasticsearch.client.ml.datafeed.DatafeedStats; @@ -2041,6 +2053,158 @@ public void onFailure(Exception e) { } } + public void testRevertModelSnapshot() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-revert-model-snapshot"; + String snapshotId = "1541587919"; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a snapshot + String documentId = jobId + "_model_snapshot_" + snapshotId; + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc", documentId); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"test-revert-model-snapshot\", \"timestamp\":1541587919000, " + + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", " + + "\"snapshot_id\":\"1541587919\", \"snapshot_doc_count\":1, \"model_size_stats\":{" + + "\"job_id\":\"test-revert-model-snapshot\", \"result_type\":\"model_size_stats\",\"model_bytes\":51722, " + + "\"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," + + "\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000, " + + "\"timestamp\":1519930800000}, \"latest_record_time_stamp\":1519931700000," + + "\"latest_result_time_stamp\":1519930800000, \"retain\":false, " + + "\"quantiles\":{\"job_id\":\"test-revert-model-snapshot\", \"timestamp\":1541587919000, " + + "\"quantile_state\":\"state\"}}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::revert-model-snapshot-request + RevertModelSnapshotRequest request = new RevertModelSnapshotRequest(jobId, snapshotId); // <1> + // end::revert-model-snapshot-request + + // tag::revert-model-snapshot-delete-intervening-results + request.setDeleteInterveningResults(true); // <1> + // end::revert-model-snapshot-delete-intervening-results + + // tag::revert-model-snapshot-execute + RevertModelSnapshotResponse response = client.machineLearning().revertModelSnapshot(request, RequestOptions.DEFAULT); + // end::revert-model-snapshot-execute + + // tag::revert-model-snapshot-response + ModelSnapshot modelSnapshot = response.getModel(); // <1> + // end::revert-model-snapshot-response + + assertEquals(snapshotId, modelSnapshot.getSnapshotId()); + assertEquals("State persisted due to job close at 2018-11-07T10:51:59+0000", modelSnapshot.getDescription()); + assertEquals(51722, modelSnapshot.getModelSizeStats().getModelBytes()); + } + { + RevertModelSnapshotRequest request = new RevertModelSnapshotRequest(jobId, snapshotId); + + // tag::revert-model-snapshot-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(RevertModelSnapshotResponse revertModelSnapshotResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::revert-model-snapshot-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::revert-model-snapshot-execute-async + client.machineLearning().revertModelSnapshotAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::revert-model-snapshot-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + + public void testUpdateModelSnapshot() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + String jobId = "test-update-model-snapshot"; + String snapshotId = "1541587919"; + String documentId = jobId + "_model_snapshot_" + snapshotId; + Job job = MachineLearningIT.buildJob(jobId); + client.machineLearning().putJob(new PutJobRequest(job), RequestOptions.DEFAULT); + + // Let us index a snapshot + IndexRequest indexRequest = new IndexRequest(".ml-anomalies-shared", "doc", documentId); + indexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + indexRequest.source("{\"job_id\":\"test-update-model-snapshot\", \"timestamp\":1541587919000, " + + "\"description\":\"State persisted due to job close at 2018-11-07T10:51:59+0000\", " + + "\"snapshot_id\":\"1541587919\", \"snapshot_doc_count\":1, \"model_size_stats\":{" + + "\"job_id\":\"test-update-model-snapshot\", \"result_type\":\"model_size_stats\",\"model_bytes\":51722, " + + "\"total_by_field_count\":3, \"total_over_field_count\":0, \"total_partition_field_count\":2," + + "\"bucket_allocation_failures_count\":0, \"memory_status\":\"ok\", \"log_time\":1541587919000, " + + "\"timestamp\":1519930800000}, \"latest_record_time_stamp\":1519931700000," + + "\"latest_result_time_stamp\":1519930800000, \"retain\":false}", XContentType.JSON); + client.index(indexRequest, RequestOptions.DEFAULT); + + { + // tag::update-model-snapshot-request + UpdateModelSnapshotRequest request = new UpdateModelSnapshotRequest(jobId, snapshotId); // <1> + // end::update-model-snapshot-request + + // tag::update-model-snapshot-description + request.setDescription("My Snapshot"); // <1> + // end::update-model-snapshot-description + + // tag::update-model-snapshot-retain + request.setRetain(true); // <1> + // end::update-model-snapshot-retain + + // tag::update-model-snapshot-execute + UpdateModelSnapshotResponse response = client.machineLearning().updateModelSnapshot(request, RequestOptions.DEFAULT); + // end::update-model-snapshot-execute + + // tag::update-model-snapshot-response + boolean acknowledged = response.getAcknowledged(); // <1> + ModelSnapshot modelSnapshot = response.getModel(); // <2> + // end::update-model-snapshot-response + + assertTrue(acknowledged); + assertEquals("My Snapshot", modelSnapshot.getDescription()); } + { + UpdateModelSnapshotRequest request = new UpdateModelSnapshotRequest(jobId, snapshotId); + + // tag::update-model-snapshot-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(UpdateModelSnapshotResponse updateModelSnapshotResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::update-model-snapshot-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::update-model-snapshot-execute-async + client.machineLearning().updateModelSnapshotAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::update-model-snapshot-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testPutCalendar() throws IOException, InterruptedException { RestHighLevelClient client = highLevelClient(); @@ -2083,6 +2247,112 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testPutCalendarJob() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + Calendar calendar = new Calendar("holidays", Collections.singletonList("job_1"), "A calendar for public holidays"); + PutCalendarRequest putRequest = new PutCalendarRequest(calendar); + client.machineLearning().putCalendar(putRequest, RequestOptions.DEFAULT); + { + // tag::put-calendar-job-request + PutCalendarJobRequest request = new PutCalendarJobRequest("holidays", // <1> + "job_2", "job_group_1"); // <2> + // end::put-calendar-job-request + + // tag::put-calendar-job-execute + PutCalendarResponse response = client.machineLearning().putCalendarJob(request, RequestOptions.DEFAULT); + // end::put-calendar-job-execute + + // tag::put-calendar-job-response + Calendar updatedCalendar = response.getCalendar(); // <1> + // end::put-calendar-job-response + + assertThat(updatedCalendar.getJobIds(), containsInAnyOrder("job_1", "job_2", "job_group_1")); + } + { + PutCalendarJobRequest request = new PutCalendarJobRequest("holidays", "job_4"); + + // tag::put-calendar-job-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(PutCalendarResponse putCalendarsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::put-calendar-job-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::put-calendar-job-execute-async + client.machineLearning().putCalendarJobAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::put-calendar-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testDeleteCalendarJob() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + Calendar calendar = new Calendar("holidays", + Arrays.asList("job_1", "job_group_1", "job_2"), + "A calendar for public holidays"); + PutCalendarRequest putRequest = new PutCalendarRequest(calendar); + client.machineLearning().putCalendar(putRequest, RequestOptions.DEFAULT); + { + // tag::delete-calendar-job-request + DeleteCalendarJobRequest request = new DeleteCalendarJobRequest("holidays", // <1> + "job_1", "job_group_1"); // <2> + // end::delete-calendar-job-request + + // tag::delete-calendar-job-execute + PutCalendarResponse response = client.machineLearning().deleteCalendarJob(request, RequestOptions.DEFAULT); + // end::delete-calendar-job-execute + + // tag::delete-calendar-job-response + Calendar updatedCalendar = response.getCalendar(); // <1> + // end::delete-calendar-job-response + + assertThat(updatedCalendar.getJobIds(), containsInAnyOrder("job_2")); + } + { + DeleteCalendarJobRequest request = new DeleteCalendarJobRequest("holidays", "job_2"); + + // tag::delete-calendar-job-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(PutCalendarResponse deleteCalendarsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::delete-calendar-job-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::delete-calendar-job-execute-async + client.machineLearning().deleteCalendarJobAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::delete-calendar-job-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testGetCalendar() throws IOException, InterruptedException { RestHighLevelClient client = highLevelClient(); @@ -2191,6 +2461,136 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } + public void testGetCalendarEvent() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + Calendar calendar = new Calendar("holidays", Collections.singletonList("job_1"), "A calendar for public holidays"); + PutCalendarRequest putRequest = new PutCalendarRequest(calendar); + client.machineLearning().putCalendar(putRequest, RequestOptions.DEFAULT); + List events = Collections.singletonList(ScheduledEventTests.testInstance(calendar.getId(), null)); + client.machineLearning().postCalendarEvent(new PostCalendarEventRequest("holidays", events), RequestOptions.DEFAULT); + { + // tag::get-calendar-events-request + GetCalendarEventsRequest request = new GetCalendarEventsRequest("holidays"); // <1> + // end::get-calendar-events-request + + // tag::get-calendar-events-page + request.setPageParams(new PageParams(10, 20)); // <1> + // end::get-calendar-events-page + + // tag::get-calendar-events-start + request.setStart("2018-08-01T00:00:00Z"); // <1> + // end::get-calendar-events-start + + // tag::get-calendar-events-end + request.setEnd("2018-08-02T00:00:00Z"); // <1> + // end::get-calendar-events-end + + // tag::get-calendar-events-jobid + request.setJobId("job_1"); // <1> + // end::get-calendar-events-jobid + + // reset params + request.setPageParams(null); + request.setJobId(null); + request.setStart(null); + request.setEnd(null); + + // tag::get-calendar-events-execute + GetCalendarEventsResponse response = client.machineLearning().getCalendarEvents(request, RequestOptions.DEFAULT); + // end::get-calendar-events-execute + + // tag::get-calendar-events-response + long count = response.count(); // <1> + List scheduledEvents = response.events(); // <2> + // end::get-calendar-events-response + assertEquals(1, scheduledEvents.size()); + } + { + GetCalendarEventsRequest request = new GetCalendarEventsRequest("holidays"); + + // tag::get-calendar-events-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(GetCalendarEventsResponse getCalendarsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-calendar-events-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::get-calendar-events-execute-async + client.machineLearning().getCalendarEventsAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::get-calendar-events-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + public void testPostCalendarEvent() throws IOException, InterruptedException { + RestHighLevelClient client = highLevelClient(); + + Calendar calendar = new Calendar("holidays", Collections.singletonList("job_1"), "A calendar for public holidays"); + PutCalendarRequest putRequest = new PutCalendarRequest(calendar); + client.machineLearning().putCalendar(putRequest, RequestOptions.DEFAULT); + { + List events = Collections.singletonList(ScheduledEventTests.testInstance(calendar.getId(), null)); + + // tag::post-calendar-event-request + PostCalendarEventRequest request = new PostCalendarEventRequest("holidays", // <1> + events); // <2> + // end::post-calendar-event-request + + // tag::post-calendar-event-execute + PostCalendarEventResponse response = client.machineLearning().postCalendarEvent(request, RequestOptions.DEFAULT); + // end::post-calendar-event-execute + + // tag::post-calendar-event-response + List scheduledEvents = response.getScheduledEvents(); // <1> + // end::post-calendar-event-response + + assertEquals(1, scheduledEvents.size()); + } + { + List events = Collections.singletonList(ScheduledEventTests.testInstance()); + PostCalendarEventRequest request = new PostCalendarEventRequest("holidays", events); // <1> + + // tag::post-calendar-event-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(PostCalendarEventResponse postCalendarsResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::post-calendar-event-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::post-calendar-event-execute-async + client.machineLearning().postCalendarEventAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::post-calendar-event-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testCreateFilter() throws Exception { RestHighLevelClient client = highLevelClient(); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 71cfdd4ba5b89..39f57706a3667 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -51,6 +51,8 @@ import org.elasticsearch.client.security.GetRoleMappingsRequest; import org.elasticsearch.client.security.GetRoleMappingsResponse; import org.elasticsearch.client.security.GetSslCertificatesResponse; +import org.elasticsearch.client.security.HasPrivilegesRequest; +import org.elasticsearch.client.security.HasPrivilegesResponse; import org.elasticsearch.client.security.InvalidateTokenRequest; import org.elasticsearch.client.security.InvalidateTokenResponse; import org.elasticsearch.client.security.PutRoleMappingRequest; @@ -63,7 +65,9 @@ import org.elasticsearch.client.security.support.expressiondsl.expressions.AnyRoleMapperExpression; import org.elasticsearch.client.security.support.expressiondsl.fields.FieldRoleMapperExpression; import org.elasticsearch.client.security.user.User; +import org.elasticsearch.client.security.user.privileges.IndicesPrivileges; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.RestStatus; import org.hamcrest.Matchers; @@ -80,6 +84,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; @@ -437,6 +442,67 @@ public void onFailure(Exception e) { } } + public void testHasPrivileges() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + //tag::has-privileges-request + HasPrivilegesRequest request = new HasPrivilegesRequest( + Sets.newHashSet("monitor", "manage"), + Sets.newHashSet( + IndicesPrivileges.builder().indices("logstash-2018-10-05").privileges("read", "write").build(), + IndicesPrivileges.builder().indices("logstash-2018-*").privileges("read").build() + ), + null + ); + //end::has-privileges-request + + //tag::has-privileges-execute + HasPrivilegesResponse response = client.security().hasPrivileges(request, RequestOptions.DEFAULT); + //end::has-privileges-execute + + //tag::has-privileges-response + boolean hasMonitor = response.hasClusterPrivilege("monitor"); // <1> + boolean hasWrite = response.hasIndexPrivilege("logstash-2018-10-05", "write"); // <2> + boolean hasRead = response.hasIndexPrivilege("logstash-2018-*", "read"); // <3> + //end::has-privileges-response + + assertThat(response.getUsername(), is("test_user")); + assertThat(response.hasAllRequested(), is(true)); + assertThat(hasMonitor, is(true)); + assertThat(hasWrite, is(true)); + assertThat(hasRead, is(true)); + assertThat(response.getApplicationPrivileges().entrySet(), emptyIterable()); + } + + { + HasPrivilegesRequest request = new HasPrivilegesRequest(Collections.singleton("monitor"),null,null); + + // tag::has-privileges-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(HasPrivilegesResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::has-privileges-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::has-privileges-execute-async + client.security().hasPrivilegesAsync(request, RequestOptions.DEFAULT, listener); // <1> + // end::has-privileges-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + public void testClearRealmCache() throws Exception { RestHighLevelClient client = highLevelClient(); { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteCalendarJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteCalendarJobRequestTests.java new file mode 100644 index 0000000000000..63d78b45986b5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/DeleteCalendarJobRequestTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.test.ESTestCase; + +public class DeleteCalendarJobRequestTests extends ESTestCase { + + public void testWithNullId() { + NullPointerException ex = expectThrows(NullPointerException.class, + () -> new DeleteCalendarJobRequest(null, "job1")); + assertEquals("[calendar_id] must not be null.", ex.getMessage()); + } + + public void testSetJobIds() { + String calendarId = randomAlphaOfLength(10); + + NullPointerException ex = expectThrows(NullPointerException.class, + () ->new DeleteCalendarJobRequest(calendarId, "job1", null)); + assertEquals("jobIds must not contain null values.", ex.getMessage()); + + IllegalArgumentException illegalArgumentException = + expectThrows(IllegalArgumentException.class, () -> new DeleteCalendarJobRequest(calendarId)); + assertEquals("jobIds must not be empty.", illegalArgumentException.getMessage()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsRequestTests.java new file mode 100644 index 0000000000000..a85eda1ac74aa --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsRequestTests.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.util.PageParams; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +public class GetCalendarEventsRequestTests extends AbstractXContentTestCase { + + @Override + protected GetCalendarEventsRequest createTestInstance() { + String calendarId = randomAlphaOfLengthBetween(1, 10); + GetCalendarEventsRequest request = new GetCalendarEventsRequest(calendarId); + if (randomBoolean()) { + request.setPageParams(new PageParams(1, 2)); + } + if (randomBoolean()) { + request.setEnd(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + request.setStart(randomAlphaOfLength(10)); + } + if (randomBoolean()) { + request.setJobId(randomAlphaOfLength(10)); + } + return request; + } + + @Override + protected GetCalendarEventsRequest doParseInstance(XContentParser parser) { + return GetCalendarEventsRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsResponseTests.java new file mode 100644 index 0000000000000..c72fe67906e74 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/GetCalendarEventsResponseTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.calendars.ScheduledEvent; +import org.elasticsearch.client.ml.calendars.ScheduledEventTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class GetCalendarEventsResponseTests extends AbstractXContentTestCase { + + @Override + protected GetCalendarEventsResponse createTestInstance() { + String calendarId = randomAlphaOfLength(10); + List scheduledEvents = new ArrayList<>(); + int count = randomIntBetween(0, 3); + for (int i=0; i { + + @Override + protected PostCalendarEventRequest createTestInstance() { + String calendarId = randomAlphaOfLength(10); + int numberOfEvents = randomIntBetween(1, 10); + List events = new ArrayList<>(numberOfEvents); + for (int i = 0; i < numberOfEvents; i++) { + events.add(ScheduledEventTests.testInstance()); + } + return new PostCalendarEventRequest(calendarId, events); + } + + @Override + protected PostCalendarEventRequest doParseInstance(XContentParser parser) throws IOException { + return PostCalendarEventRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostCalendarEventResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostCalendarEventResponseTests.java new file mode 100644 index 0000000000000..8f8be0981c864 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PostCalendarEventResponseTests.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.calendars.ScheduledEvent; +import org.elasticsearch.client.ml.calendars.ScheduledEventTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class PostCalendarEventResponseTests extends AbstractXContentTestCase { + @Override + protected PostCalendarEventResponse createTestInstance() { + int numberOfEvents = randomIntBetween(1, 10); + List events = new ArrayList<>(numberOfEvents); + for (int i = 0; i < numberOfEvents; i++) { + events.add(ScheduledEventTests.testInstance()); + } + return new PostCalendarEventResponse(events); + } + + @Override + protected PostCalendarEventResponse doParseInstance(XContentParser parser) throws IOException { + return PostCalendarEventResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarJobRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarJobRequestTests.java new file mode 100644 index 0000000000000..604146f74cfe5 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/PutCalendarJobRequestTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.ml; + +import org.elasticsearch.test.ESTestCase; + +public class PutCalendarJobRequestTests extends ESTestCase { + + public void testWithNullId() { + NullPointerException ex = expectThrows(NullPointerException.class, + () -> new PutCalendarJobRequest(null, "job1")); + assertEquals("[calendar_id] must not be null.", ex.getMessage()); + } + + public void testSetJobIds() { + String calendarId = randomAlphaOfLength(10); + + NullPointerException ex = expectThrows(NullPointerException.class, + () ->new PutCalendarJobRequest(calendarId, "job1", null)); + assertEquals("jobIds must not contain null values.", ex.getMessage()); + + IllegalArgumentException illegalArgumentException = + expectThrows(IllegalArgumentException.class, () -> new PutCalendarJobRequest(calendarId)); + assertEquals("jobIds must not be empty.", illegalArgumentException.getMessage()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/RevertModelSnapshotRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/RevertModelSnapshotRequestTests.java new file mode 100644 index 0000000000000..5a9581df8f700 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/RevertModelSnapshotRequestTests.java @@ -0,0 +1,50 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + + +public class RevertModelSnapshotRequestTests extends AbstractXContentTestCase { + + @Override + protected RevertModelSnapshotRequest createTestInstance() { + String jobId = randomAlphaOfLengthBetween(1, 20); + String snapshotId = randomAlphaOfLengthBetween(1, 20); + RevertModelSnapshotRequest request = new RevertModelSnapshotRequest(jobId, snapshotId); + if (randomBoolean()) { + request.setDeleteInterveningResults(randomBoolean()); + } + + return request; + } + + @Override + protected RevertModelSnapshotRequest doParseInstance(XContentParser parser) throws IOException { + return RevertModelSnapshotRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/RevertModelSnapshotResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/RevertModelSnapshotResponseTests.java new file mode 100644 index 0000000000000..9d46c51566b7e --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/RevertModelSnapshotResponseTests.java @@ -0,0 +1,46 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.process.ModelSnapshot; +import org.elasticsearch.client.ml.job.process.ModelSnapshotTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + + +public class RevertModelSnapshotResponseTests extends AbstractXContentTestCase { + + @Override + protected RevertModelSnapshotResponse createTestInstance() { + ModelSnapshot.Builder modelBuilder = ModelSnapshotTests.createRandomizedBuilder(); + return new RevertModelSnapshotResponse(modelBuilder); + } + + @Override + protected RevertModelSnapshotResponse doParseInstance(XContentParser parser) throws IOException { + return RevertModelSnapshotResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateModelSnapshotRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateModelSnapshotRequestTests.java new file mode 100644 index 0000000000000..659265fcfeac4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateModelSnapshotRequestTests.java @@ -0,0 +1,53 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + + +public class UpdateModelSnapshotRequestTests extends AbstractXContentTestCase { + + @Override + protected UpdateModelSnapshotRequest createTestInstance() { + String jobId = randomAlphaOfLengthBetween(1, 20); + String snapshotId = randomAlphaOfLengthBetween(1, 20); + UpdateModelSnapshotRequest request = new UpdateModelSnapshotRequest(jobId, snapshotId); + if (randomBoolean()) { + request.setDescription(String.valueOf(randomNonNegativeLong())); + } + if (randomBoolean()) { + request.setRetain(randomBoolean()); + } + + return request; + } + + @Override + protected UpdateModelSnapshotRequest doParseInstance(XContentParser parser) throws IOException { + return UpdateModelSnapshotRequest.PARSER.apply(parser, null); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateModelSnapshotResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateModelSnapshotResponseTests.java new file mode 100644 index 0000000000000..b03c07d880435 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/UpdateModelSnapshotResponseTests.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.ml; + +import org.elasticsearch.client.ml.job.process.ModelSnapshot; +import org.elasticsearch.client.ml.job.process.ModelSnapshotTests; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + + +public class UpdateModelSnapshotResponseTests extends AbstractXContentTestCase { + + @Override + protected UpdateModelSnapshotResponse createTestInstance() { + Boolean acknowledged = randomBoolean(); + ModelSnapshot.Builder modelBuilder = ModelSnapshotTests.createRandomizedBuilder(); + return new UpdateModelSnapshotResponse(acknowledged, modelBuilder); + } + + @Override + protected UpdateModelSnapshotResponse doParseInstance(XContentParser parser) throws IOException { + return UpdateModelSnapshotResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/calendars/ScheduledEventTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/calendars/ScheduledEventTests.java index 0b7a293340245..77380c2bd35e6 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/calendars/ScheduledEventTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/calendars/ScheduledEventTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.client.ml.calendars; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractXContentTestCase; @@ -26,12 +27,16 @@ public class ScheduledEventTests extends AbstractXContentTestCase { - public static ScheduledEvent testInstance() { + public static ScheduledEvent testInstance(String calendarId, @Nullable String eventId) { Date start = new Date(randomNonNegativeLong()); Date end = new Date(start.getTime() + randomIntBetween(1, 10000) * 1000); - return new ScheduledEvent(randomAlphaOfLength(10), start, end, randomAlphaOfLengthBetween(1, 20), - randomBoolean() ? null : randomAlphaOfLength(7)); + return new ScheduledEvent(randomAlphaOfLength(10), start, end, calendarId, eventId); + } + + public static ScheduledEvent testInstance() { + return testInstance(randomAlphaOfLengthBetween(1, 20), + randomBoolean() ? null : randomAlphaOfLength(7)); } @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java index 7b76688f4d31b..3914697b768ba 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ml/job/config/AnalysisConfigTests.java @@ -89,12 +89,6 @@ public static AnalysisConfig.Builder createRandomized() { if (randomBoolean()) { builder.setMultivariateByFields(randomBoolean()); } - if (randomBoolean()) { - builder.setOverlappingBuckets(randomBoolean()); - } - if (randomBoolean()) { - builder.setResultFinalizationWindow(randomNonNegativeLong()); - } builder.setInfluencers(Arrays.asList(generateRandomStringArray(10, 10, false))); return builder; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java new file mode 100644 index 0000000000000..5a888bd95e4ab --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesRequestTests.java @@ -0,0 +1,111 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.security; + +import org.elasticsearch.client.security.user.privileges.ApplicationResourcePrivileges; +import org.elasticsearch.client.security.user.privileges.IndicesPrivileges; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.test.XContentTestUtils; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Set; + +public class HasPrivilegesRequestTests extends ESTestCase { + + public void testToXContent() throws IOException { + final HasPrivilegesRequest request = new HasPrivilegesRequest( + new LinkedHashSet<>(Arrays.asList("monitor", "manage_watcher", "manage_ml")), + new LinkedHashSet<>(Arrays.asList( + IndicesPrivileges.builder().indices("index-001", "index-002").privileges("all").build(), + IndicesPrivileges.builder().indices("index-003").privileges("read").build() + )), + new LinkedHashSet<>(Arrays.asList( + new ApplicationResourcePrivileges("myapp", Arrays.asList("read", "write"), Arrays.asList("*")), + new ApplicationResourcePrivileges("myapp", Arrays.asList("admin"), Arrays.asList("/data/*")) + )) + ); + String json = Strings.toString(request); + final Map parsed = XContentHelper.convertToMap(XContentType.JSON.xContent(), json, false); + + final Map expected = XContentHelper.convertToMap(XContentType.JSON.xContent(), "{" + + " \"cluster\":[\"monitor\",\"manage_watcher\",\"manage_ml\"]," + + " \"index\":[{" + + " \"names\":[\"index-001\",\"index-002\"]," + + " \"privileges\":[\"all\"]" + + " },{" + + " \"names\":[\"index-003\"]," + + " \"privileges\":[\"read\"]" + + " }]," + + " \"application\":[{" + + " \"application\":\"myapp\"," + + " \"privileges\":[\"read\",\"write\"]," + + " \"resources\":[\"*\"]" + + " },{" + + " \"application\":\"myapp\"," + + " \"privileges\":[\"admin\"]," + + " \"resources\":[\"/data/*\"]" + + " }]" + + "}", false); + + assertThat(XContentTestUtils.differenceBetweenMapsIgnoringArrayOrder(parsed, expected), Matchers.nullValue()); + } + + public void testEqualsAndHashCode() { + final Set cluster = Sets.newHashSet(randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 8))); + final Set indices = Sets.newHashSet(randomArray(1, 5, IndicesPrivileges[]::new, + () -> IndicesPrivileges.builder() + .indices(generateRandomStringArray(5, 12, false, false)) + .privileges(generateRandomStringArray(3, 8, false, false)) + .build())); + final Set application = Sets.newHashSet(randomArray(1, 5, ApplicationResourcePrivileges[]::new, + () -> new ApplicationResourcePrivileges( + randomAlphaOfLengthBetween(5, 12), + Sets.newHashSet(generateRandomStringArray(3, 8, false, false)), + Sets.newHashSet(generateRandomStringArray(2, 6, false, false)) + ))); + final HasPrivilegesRequest request = new HasPrivilegesRequest(cluster, indices, application); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(request, this::copy, this::mutate); + } + + private HasPrivilegesRequest copy(HasPrivilegesRequest request) { + return new HasPrivilegesRequest(request.getClusterPrivileges(), request.getIndexPrivileges(), request.getApplicationPrivileges()); + } + + private HasPrivilegesRequest mutate(HasPrivilegesRequest request) { + switch (randomIntBetween(1, 3)) { + case 1: + return new HasPrivilegesRequest(null, request.getIndexPrivileges(), request.getApplicationPrivileges()); + case 2: + return new HasPrivilegesRequest(request.getClusterPrivileges(), null, request.getApplicationPrivileges()); + case 3: + return new HasPrivilegesRequest(request.getClusterPrivileges(), request.getIndexPrivileges(), null); + } + throw new IllegalStateException("The universe is broken (or the RNG is)"); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesResponseTests.java new file mode 100644 index 0000000000000..2fb542f4314d7 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/HasPrivilegesResponseTests.java @@ -0,0 +1,262 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client.security; + +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; + +import static java.util.Collections.emptyMap; + +public class HasPrivilegesResponseTests extends ESTestCase { + + public void testParseValidResponse() throws IOException { + String json = "{" + + " \"username\": \"namor\"," + + " \"has_all_requested\": false," + + " \"cluster\" : {" + + " \"manage\" : false," + + " \"monitor\" : true" + + " }," + + " \"index\" : {" + + " \"index-01\": {" + + " \"read\" : true," + + " \"write\" : false" + + " }," + + " \"index-02\": {" + + " \"read\" : true," + + " \"write\" : true" + + " }," + + " \"index-03\": {" + + " \"read\" : false," + + " \"write\" : false" + + " }" + + " }," + + " \"application\" : {" + + " \"app01\" : {" + + " \"/object/1\" : {" + + " \"read\" : true," + + " \"write\" : false" + + " }," + + " \"/object/2\" : {" + + " \"read\" : true," + + " \"write\" : true" + + " }" + + " }," + + " \"app02\" : {" + + " \"/object/1\" : {" + + " \"read\" : false," + + " \"write\" : false" + + " }," + + " \"/object/3\" : {" + + " \"read\" : false," + + " \"write\" : true" + + " }" + + " }" + + " }" + + "}"; + final XContentParser parser = createParser(XContentType.JSON.xContent(), json); + HasPrivilegesResponse response = HasPrivilegesResponse.fromXContent(parser); + + assertThat(response.getUsername(), Matchers.equalTo("namor")); + assertThat(response.hasAllRequested(), Matchers.equalTo(false)); + + assertThat(response.getClusterPrivileges().keySet(), Matchers.containsInAnyOrder("monitor", "manage")); + assertThat(response.hasClusterPrivilege("monitor"), Matchers.equalTo(true)); + assertThat(response.hasClusterPrivilege("manage"), Matchers.equalTo(false)); + + assertThat(response.getIndexPrivileges().keySet(), Matchers.containsInAnyOrder("index-01", "index-02", "index-03")); + assertThat(response.hasIndexPrivilege("index-01", "read"), Matchers.equalTo(true)); + assertThat(response.hasIndexPrivilege("index-01", "write"), Matchers.equalTo(false)); + assertThat(response.hasIndexPrivilege("index-02", "read"), Matchers.equalTo(true)); + assertThat(response.hasIndexPrivilege("index-02", "write"), Matchers.equalTo(true)); + assertThat(response.hasIndexPrivilege("index-03", "read"), Matchers.equalTo(false)); + assertThat(response.hasIndexPrivilege("index-03", "write"), Matchers.equalTo(false)); + + assertThat(response.getApplicationPrivileges().keySet(), Matchers.containsInAnyOrder("app01", "app02")); + assertThat(response.hasApplicationPrivilege("app01", "/object/1", "read"), Matchers.equalTo(true)); + assertThat(response.hasApplicationPrivilege("app01", "/object/1", "write"), Matchers.equalTo(false)); + assertThat(response.hasApplicationPrivilege("app01", "/object/2", "read"), Matchers.equalTo(true)); + assertThat(response.hasApplicationPrivilege("app01", "/object/2", "write"), Matchers.equalTo(true)); + assertThat(response.hasApplicationPrivilege("app02", "/object/1", "read"), Matchers.equalTo(false)); + assertThat(response.hasApplicationPrivilege("app02", "/object/1", "write"), Matchers.equalTo(false)); + assertThat(response.hasApplicationPrivilege("app02", "/object/3", "read"), Matchers.equalTo(false)); + assertThat(response.hasApplicationPrivilege("app02", "/object/3", "write"), Matchers.equalTo(true)); + } + + public void testHasClusterPrivilege() { + final Map cluster = MapBuilder.newMapBuilder() + .put("a", true) + .put("b", false) + .put("c", false) + .put("d", true) + .map(); + final HasPrivilegesResponse response = new HasPrivilegesResponse("x", false, cluster, emptyMap(), emptyMap()); + assertThat(response.hasClusterPrivilege("a"), Matchers.is(true)); + assertThat(response.hasClusterPrivilege("b"), Matchers.is(false)); + assertThat(response.hasClusterPrivilege("c"), Matchers.is(false)); + assertThat(response.hasClusterPrivilege("d"), Matchers.is(true)); + + final IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> response.hasClusterPrivilege("e")); + assertThat(iae.getMessage(), Matchers.containsString("[e]")); + assertThat(iae.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("cluster privilege")); + } + + public void testHasIndexPrivilege() { + final Map> index = MapBuilder.>newMapBuilder() + .put("i1", Collections.singletonMap("read", true)) + .put("i2", Collections.singletonMap("read", false)) + .put("i3", MapBuilder.newMapBuilder().put("read", true).put("write", true).map()) + .put("i4", MapBuilder.newMapBuilder().put("read", true).put("write", false).map()) + .put("i*", MapBuilder.newMapBuilder().put("read", false).put("write", false).map()) + .map(); + final HasPrivilegesResponse response = new HasPrivilegesResponse("x", false, emptyMap(), index, emptyMap()); + assertThat(response.hasIndexPrivilege("i1", "read"), Matchers.is(true)); + assertThat(response.hasIndexPrivilege("i2", "read"), Matchers.is(false)); + assertThat(response.hasIndexPrivilege("i3", "read"), Matchers.is(true)); + assertThat(response.hasIndexPrivilege("i3", "write"), Matchers.is(true)); + assertThat(response.hasIndexPrivilege("i4", "read"), Matchers.is(true)); + assertThat(response.hasIndexPrivilege("i4", "write"), Matchers.is(false)); + assertThat(response.hasIndexPrivilege("i*", "read"), Matchers.is(false)); + assertThat(response.hasIndexPrivilege("i*", "write"), Matchers.is(false)); + + final IllegalArgumentException iae1 = expectThrows(IllegalArgumentException.class, () -> response.hasIndexPrivilege("i0", "read")); + assertThat(iae1.getMessage(), Matchers.containsString("index [i0]")); + + final IllegalArgumentException iae2 = expectThrows(IllegalArgumentException.class, () -> response.hasIndexPrivilege("i1", "write")); + assertThat(iae2.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("privilege [write]")); + assertThat(iae2.getMessage(), Matchers.containsString("index [i1]")); + } + + public void testHasApplicationPrivilege() { + final Map> app1 = MapBuilder.>newMapBuilder() + .put("/data/1", Collections.singletonMap("read", true)) + .put("/data/2", Collections.singletonMap("read", false)) + .put("/data/3", MapBuilder.newMapBuilder().put("read", true).put("write", true).map()) + .put("/data/4", MapBuilder.newMapBuilder().put("read", true).put("write", false).map()) + .map(); + final Map> app2 = MapBuilder.>newMapBuilder() + .put("/action/1", Collections.singletonMap("execute", true)) + .put("/action/*", Collections.singletonMap("execute", false)) + .map(); + Map>> appPrivileges = new HashMap<>(); + appPrivileges.put("a1", app1); + appPrivileges.put("a2", app2); + final HasPrivilegesResponse response = new HasPrivilegesResponse("x", false, emptyMap(), emptyMap(), appPrivileges); + assertThat(response.hasApplicationPrivilege("a1", "/data/1", "read"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a1", "/data/2", "read"), Matchers.is(false)); + assertThat(response.hasApplicationPrivilege("a1", "/data/3", "read"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a1", "/data/3", "write"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a1", "/data/4", "read"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a1", "/data/4", "write"), Matchers.is(false)); + assertThat(response.hasApplicationPrivilege("a2", "/action/1", "execute"), Matchers.is(true)); + assertThat(response.hasApplicationPrivilege("a2", "/action/*", "execute"), Matchers.is(false)); + + final IllegalArgumentException iae1 = expectThrows(IllegalArgumentException.class, + () -> response.hasApplicationPrivilege("a0", "/data/1", "read")); + assertThat(iae1.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("application [a0]")); + + final IllegalArgumentException iae2 = expectThrows(IllegalArgumentException.class, + () -> response.hasApplicationPrivilege("a1", "/data/0", "read")); + assertThat(iae2.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("application [a1]")); + assertThat(iae2.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("resource [/data/0]")); + + final IllegalArgumentException iae3 = expectThrows(IllegalArgumentException.class, + () -> response.hasApplicationPrivilege("a1", "/action/1", "execute")); + assertThat(iae3.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("application [a1]")); + assertThat(iae3.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("resource [/action/1]")); + + final IllegalArgumentException iae4 = expectThrows(IllegalArgumentException.class, + () -> response.hasApplicationPrivilege("a1", "/data/1", "write")); + assertThat(iae4.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("application [a1]")); + assertThat(iae4.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("resource [/data/1]")); + assertThat(iae4.getMessage().toLowerCase(Locale.ROOT), Matchers.containsString("privilege [write]")); + } + + public void testEqualsAndHashCode() { + final HasPrivilegesResponse response = randomResponse(); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(response, this::copy, this::mutate); + } + + private HasPrivilegesResponse copy(HasPrivilegesResponse response) { + return new HasPrivilegesResponse(response.getUsername(), + response.hasAllRequested(), + response.getClusterPrivileges(), + response.getIndexPrivileges(), + response.getApplicationPrivileges()); + } + + private HasPrivilegesResponse mutate(HasPrivilegesResponse request) { + switch (randomIntBetween(1, 5)) { + case 1: + return new HasPrivilegesResponse("_" + request.getUsername(), request.hasAllRequested(), + request.getClusterPrivileges(), request.getIndexPrivileges(), request.getApplicationPrivileges()); + case 2: + return new HasPrivilegesResponse(request.getUsername(), request.hasAllRequested() == false, + request.getClusterPrivileges(), request.getIndexPrivileges(), request.getApplicationPrivileges()); + case 3: + return new HasPrivilegesResponse(request.getUsername(), request.hasAllRequested(), + emptyMap(), request.getIndexPrivileges(), request.getApplicationPrivileges()); + case 4: + return new HasPrivilegesResponse(request.getUsername(), request.hasAllRequested(), + request.getClusterPrivileges(), emptyMap(), request.getApplicationPrivileges()); + case 5: + return new HasPrivilegesResponse(request.getUsername(), request.hasAllRequested(), + request.getClusterPrivileges(), request.getIndexPrivileges(), emptyMap()); + } + throw new IllegalStateException("The universe is broken (or the RNG is)"); + } + + private HasPrivilegesResponse randomResponse() { + final Map cluster = randomPrivilegeMap(); + final Map> index = randomResourceMap(); + + final Map>> application = new HashMap<>(); + for (String app : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(3, 6).toLowerCase(Locale.ROOT))) { + application.put(app, randomResourceMap()); + } + return new HasPrivilegesResponse(randomAlphaOfLengthBetween(3, 8), randomBoolean(), cluster, index, application); + } + + private Map> randomResourceMap() { + final Map> resource = new HashMap<>(); + for (String res : randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(5, 8))) { + resource.put(res, randomPrivilegeMap()); + } + return resource; + } + + private Map randomPrivilegeMap() { + final Map map = new HashMap<>(); + for (String privilege : randomArray(1, 6, String[]::new, () -> randomAlphaOfLengthBetween(3, 12))) { + map.put(privilege, randomBoolean()); + } + return map; + } + +} diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index ceff354f15fe8..daedbd92180ff 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -112,6 +112,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.not; @@ -726,7 +727,7 @@ public void testZipRelativeOutsideEntryName() throws Exception { assertInstallCleaned(env.v2()); } - public void testOfficialPluginsHelpSorted() throws Exception { + public void testOfficialPluginsHelpSortedAndMissingObviouslyWrongPlugins() throws Exception { MockTerminal terminal = new MockTerminal(); new InstallPluginCommand() { @Override @@ -749,6 +750,9 @@ protected boolean addShutdownHook() { assertTrue(prev + " < " + line, prev.compareTo(line) < 0); prev = line; line = reader.readLine(); + // qa is not really a plugin and it shouldn't sneak in + assertThat(line, not(endsWith("qa"))); + assertThat(line, not(endsWith("example"))); } } } diff --git a/docs/java-api/docs/bulk.asciidoc b/docs/java-api/docs/bulk.asciidoc index 03c6ae719e5bd..1c2882d9c07e7 100644 --- a/docs/java-api/docs/bulk.asciidoc +++ b/docs/java-api/docs/bulk.asciidoc @@ -165,3 +165,26 @@ client.admin().indices().prepareRefresh().get(); client.prepareSearch().get(); -------------------------------------------------- + +[[java-docs-bulk-global-parameters]] +==== Global Parameters + +Global parameters can be specified on the BulkRequest as well as BulkProcessor, similar to the REST API. These global + parameters serve as defaults and can be overridden by local parameters specified on each sub request. Some parameters + have to be set before any sub request is added - index, type - and you have to specify them during BulkRequest or + BulkProcessor creation. Some are optional - pipeline, routing - and can be specified at any point before the bulk is sent. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{hlrc-tests}/BulkProcessorIT.java[bulk-processor-mix-parameters] +-------------------------------------------------- +<1> global parameters from the BulkRequest will be applied on a sub request +<2> local pipeline parameter on a sub request will override global parameters from BulkRequest + + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{hlrc-tests}/BulkRequestWithGlobalParametersIT.java[bulk-request-mix-pipeline] +-------------------------------------------------- +<1> local pipeline parameter on a sub request will override global pipeline from the BulkRequest +<2> global parameter from the BulkRequest will be applied on a sub request diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index fbbba6da884f4..73f90fa938b2e 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -151,6 +151,7 @@ and add it as a dependency. As an example, we will use the `slf4j-simple` logger -------------------------------------------------- :client-tests: {docdir}/../../server/src/test/java/org/elasticsearch/client/documentation +:hlrc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client :client-reindex-tests: {docdir}/../../modules/reindex/src/test/java/org/elasticsearch/client/documentation diff --git a/docs/java-rest/high-level/ccr/resume_follow.asciidoc b/docs/java-rest/high-level/ccr/resume_follow.asciidoc new file mode 100644 index 0000000000000..349440dbc9450 --- /dev/null +++ b/docs/java-rest/high-level/ccr/resume_follow.asciidoc @@ -0,0 +1,35 @@ +-- +:api: ccr-resume-follow +:request: ResumeFollowRequest +:response: ResumeFollowResponse +-- + +[id="{upid}-{api}"] +=== Resume Follow API + + +[id="{upid}-{api}-request"] +==== Request + +The Resume Follow API allows you to resume following a follower index that has been paused. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The name of follower index. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the resume follow request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the resume follow was acknowledged. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ccr/unfollow.asciidoc b/docs/java-rest/high-level/ccr/unfollow.asciidoc new file mode 100644 index 0000000000000..bb6dd654ed4d1 --- /dev/null +++ b/docs/java-rest/high-level/ccr/unfollow.asciidoc @@ -0,0 +1,36 @@ +-- +:api: ccr-unfollow +:request: UnfollowRequest +:response: UnfollowResponse +-- + +[id="{upid}-{api}"] +=== Unfollow API + + +[id="{upid}-{api}-request"] +==== Request + +The Unfollow API allows you to unfollow a follower index and make it a regular index. +Note that the follower index needs to be paused and the follower index needs to be closed. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The name of follow index to unfollow. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the unfollow request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the unfollow was acknowledge. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/document/bulk.asciidoc b/docs/java-rest/high-level/document/bulk.asciidoc index db9a3463135e8..d68a8ce6c7f81 100644 --- a/docs/java-rest/high-level/document/bulk.asciidoc +++ b/docs/java-rest/high-level/document/bulk.asciidoc @@ -37,9 +37,9 @@ And different operation types can be added to the same +{request}+: -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-request-with-mixed-operations] -------------------------------------------------- -<1> Adds a `DeleteRequest` to the `BulkRequest`. See <<{upid}-delete>> +<1> Adds a `DeleteRequest` to the +{request}+. See <<{upid}-delete>> for more information on how to build `DeleteRequest`. -<2> Adds an `UpdateRequest` to the `BulkRequest`. See <<{upid}-update>> +<2> Adds an `UpdateRequest` to the +{request}+. See <<{upid}-update>> for more information on how to build `UpdateRequest`. <3> Adds an `IndexRequest` using the SMILE format @@ -70,6 +70,25 @@ the index/update/delete operations. `ActiveShardCount.ALL`, `ActiveShardCount.ONE` or `ActiveShardCount.DEFAULT` (default) +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-pipeline] +-------------------------------------------------- +<1> Global pipelineId used on all sub requests, unless overridden on a sub request + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-routing] +-------------------------------------------------- +<1> Global routingId used on all sub requests, unless overridden on a sub request + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/CRUDDocumentationIT.java[bulk-request-index-type] +-------------------------------------------------- +<1> A bulk request with global index and type used on all sub requests, unless overridden on a sub request. +Both parameters are @Nullable and can only be set during BulkRequest creation. + include::../execution.asciidoc[] [id="{upid}-{api}-response"] @@ -148,7 +167,7 @@ actions currently added (defaults to 1000, use -1 to disable it) actions currently added (defaults to 5Mb, use -1 to disable it) <3> Set the number of concurrent requests allowed to be executed (default to 1, use 0 to only allow the execution of a single request) -<4> Set a flush interval flushing any `BulkRequest` pending if the +<4> Set a flush interval flushing any +{request}+ pending if the interval passes (defaults to not set) <5> Set a constant back off policy that initially waits for 1 second and retries up to 3 times. See `BackoffPolicy.noBackoff()`, diff --git a/docs/java-rest/high-level/document/multi-term-vectors.asciidoc b/docs/java-rest/high-level/document/multi-term-vectors.asciidoc new file mode 100644 index 0000000000000..d2c4666130b2b --- /dev/null +++ b/docs/java-rest/high-level/document/multi-term-vectors.asciidoc @@ -0,0 +1,59 @@ +-- +:api: multi-term-vectors +:request: MultiTermVectorsRequest +:response: MultiTermVectorsResponse +:tvrequest: TermVectorsRequest +-- + +[id="{upid}-{api}"] +=== Multi Term Vectors API + +Multi Term Vectors API allows to get multiple term vectors at once. + +[id="{upid}-{api}-request"] +==== Multi Term Vectors Request +There are two ways to create a +{request}+. + +The first way is to create an empty +{request}+, and then add individual +<> to it. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Create an empty +{request}+. +<2> Add the first +{tvrequest}+ to the +{request}+. +<3> Add the second +{tvrequest}+ for an artificial doc to the +{request}+. + + +The second way can be used when all term vectors requests share the same +arguments, such as index, type, and other settings. In this case, a template ++{tvrequest}+ can be created with all necessary settings set, and +this template request can be passed to +{request}+ along with all +documents' ids for which to execute these requests. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-template] +-------------------------------------------------- +<1> Create a template +{tvrequest}+. +<2> Pass documents' ids and the template to the +{request}+. + + +include::../execution.asciidoc[] + + +[id="{upid}-{api}-response"] +==== Multi Term Vectors Response + ++{response}+ allows to get the list of term vectors responses, +each of which can be inspected as described in +<>. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Get a list of `TermVectorsResponse` + + diff --git a/docs/java-rest/high-level/document/reindex.asciidoc b/docs/java-rest/high-level/document/reindex.asciidoc index 2482467410c96..7d8876aa1269a 100644 --- a/docs/java-rest/high-level/document/reindex.asciidoc +++ b/docs/java-rest/high-level/document/reindex.asciidoc @@ -10,7 +10,7 @@ [id="{upid}-{api}-request"] ==== Reindex Request -A +{request} can be used to copy documents from one or more indexes into a +A +{request}+ can be used to copy documents from one or more indexes into a destination index. It requires an existing source index and a target index which may or may not exist pre-request. Reindex does not attempt @@ -100,7 +100,7 @@ include-tagged::{doc-tests-file}[{api}-request-sort] <1> add descending sort to`field1` <2> add ascending sort to `field2` -+{request} also supports a `script` that modifies the document. It allows you to ++{request}+ also supports a `script` that modifies the document. It allows you to also change the document's metadata. The following example illustrates that. ["source","java",subs="attributes,callouts,macros"] @@ -157,6 +157,19 @@ include-tagged::{doc-tests-file}[{api}-request-refresh] include::../execution.asciidoc[] +[id="{upid}-{api}-task-submission"] +==== Reindex task submission +It is also possible to submit a +{request}+ and not wait for it completion with the use of Task API. This is an equivalent of a REST request +with wait_for_completion flag set to false. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{hlrc-tests}/ReindexIT.java[submit-reindex-task] +-------------------------------------------------- +<1> A +{request}+ is constructed the same way as for the synchronous method +<2> A submit method returns a `TaskSubmissionResponse` which contains a task identifier. +<3> The task identifier can be used to get `response` from a completed task. + [id="{upid}-{api}-response"] ==== Reindex Response diff --git a/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc new file mode 100644 index 0000000000000..e6f100294aec4 --- /dev/null +++ b/docs/java-rest/high-level/ilm/delete_lifecycle_policy.asciidoc @@ -0,0 +1,36 @@ +-- +:api: ilm-delete-lifecycle-policy +:request: DeleteLifecyclePolicyRequest +:response: AcknowledgedResponse +-- + +[id="{upid}-{api}"] +=== Delete Lifecycle Policy API + + +[id="{upid}-{api}-request"] +==== Request + +The Delete Lifecycle Policy API allows you to delete an Index Lifecycle +Management Policy from the cluster. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The policy named `my_policy` will be deleted. + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the delete lifecycle policy request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the delete lifecycle policy request was acknowledged. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc b/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc new file mode 100644 index 0000000000000..713c5480cae04 --- /dev/null +++ b/docs/java-rest/high-level/ilm/lifecycle_management_status.asciidoc @@ -0,0 +1,36 @@ +-- +:api: ilm-status +:request: LifecycleManagementStatusRequest +:response: AcknowledgedResponse +-- + +[id="{upid}-{api}"] +=== Index Lifecycle Management Status API + + +[id="{upid}-{api}-request"] +==== Request + +The Index Lifecycle Management Status API allows you to retrieve the status +of Index Lifecycle Management + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates the status of Index Lifecycle Management. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The returned status can be `RUNNING`, `STOPPING`, or `STOPPED`. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc index 23671e23f7569..75103fa5bdfd9 100644 --- a/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc +++ b/docs/java-rest/high-level/ilm/put_lifecycle_policy.asciidoc @@ -31,7 +31,7 @@ The returned +{response}+ indicates if the put lifecycle policy request was rece -------------------------------------------------- include-tagged::{doc-tests-file}[{api}-response] -------------------------------------------------- -<1> Whether or not the put lifecycle policy was acknowledge. +<1> Whether or not the put lifecycle policy was acknowledged. include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc b/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc new file mode 100644 index 0000000000000..89dd4ea1cfa6b --- /dev/null +++ b/docs/java-rest/high-level/ilm/retry_lifecycle_policy.asciidoc @@ -0,0 +1,36 @@ +-- +:api: ilm-retry-lifecycle-policy +:request: RetryLifecyclePolicyRequest +:response: AcknowledgedResponse +-- + +[id="{upid}-{api}"] +=== Retry Lifecycle Policy API + + +[id="{upid}-{api}-request"] +==== Request + +The Retry Lifecycle Policy API allows you to invoke execution of policies +that encountered errors in certain indices. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Retries execution of `my_index`'s policy + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the retry lifecycle policy request was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the lifecycle policy retry was acknowledged. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc b/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc new file mode 100644 index 0000000000000..d65e7dd5009fb --- /dev/null +++ b/docs/java-rest/high-level/ilm/start_lifecycle_management.asciidoc @@ -0,0 +1,36 @@ +-- +:api: ilm-start-ilm +:request: StartILMRequest +:response: AcknowledgedResponse +-- + +[id="{upid}-{api}"] +=== Start Index Lifecycle Management API + + +[id="{upid}-{api}-request"] +==== Request + +The Start Lifecycle Management API allows you to start Index Lifecycle +Management if it has previously been stopped. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the request to start Index Lifecycle +Management was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the request to start Index Lifecycle Management was +acknowledged. + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc b/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc new file mode 100644 index 0000000000000..85117fe311a3a --- /dev/null +++ b/docs/java-rest/high-level/ilm/stop_lifecycle_management.asciidoc @@ -0,0 +1,38 @@ +-- +:api: ilm-stop-ilm +:request: StopILMRequest +:response: AcknowledgedResponse +-- + +[id="{upid}-{api}"] +=== Stop Index Lifecycle Management API + + +[id="{upid}-{api}-request"] +==== Request + +The Stop Lifecycle Management API allows you to stop Index Lifecycle +Management temporarily. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- + + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ indicates if the request to stop Index Lifecycle +Management was received. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> Whether or not the request to stop Index Lifecycle Management was +acknowledged. + +include::../execution.asciidoc[] + + diff --git a/docs/java-rest/high-level/index.asciidoc b/docs/java-rest/high-level/index.asciidoc index a15967e9ad717..2010c9c539a54 100644 --- a/docs/java-rest/high-level/index.asciidoc +++ b/docs/java-rest/high-level/index.asciidoc @@ -25,6 +25,7 @@ the same response objects. -- :doc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client/documentation +:hlrc-tests: {docdir}/../../client/rest-high-level/src/test/java/org/elasticsearch/client include::getting-started.asciidoc[] include::supported-apis.asciidoc[] diff --git a/docs/java-rest/high-level/migration/upgrade.asciidoc b/docs/java-rest/high-level/migration/upgrade.asciidoc index 76eae0652d9bf..7497b74d38391 100644 --- a/docs/java-rest/high-level/migration/upgrade.asciidoc +++ b/docs/java-rest/high-level/migration/upgrade.asciidoc @@ -1,14 +1,22 @@ +-- +:api: upgrade +:request: IndexUpgradeRequest +:response: BulkByScrollResponse +:submit_response: IndexUpgradeSubmissionResponse +:doc-tests-file: {doc-tests}/MigrationClientDocumentationIT.java +-- + [[java-rest-high-migration-upgrade]] === Migration Upgrade [[java-rest-high-migraton-upgrade-request]] ==== Index Upgrade Request -An `IndexUpgradeRequest` requires an index argument. Only one index at the time should be upgraded: +An +{request}+ requires an index argument. Only one index at the time should be upgraded: ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-request] +include-tagged::{doc-tests-file}[{api}-request] -------------------------------------------------- <1> Create a new request instance @@ -17,39 +25,37 @@ include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-request] ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-execute] +include-tagged::{doc-tests-file}[{api}-execute] -------------------------------------------------- [[java-rest-high-migration-upgrade-response]] ==== Response -The returned `BulkByScrollResponse` contains information about the executed operation +The returned +{response}+ contains information about the executed operation [[java-rest-high-migraton-async-upgrade-request]] ==== Asynchronous Execution -The asynchronous execution of a upgrade request requires both the `IndexUpgradeRequest` +The asynchronous execution of an upgrade request requires both the +{request}+ instance and an `ActionListener` instance to be passed to the asynchronous method: -A typical listener for `BulkResponse` looks like: - ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-async-listener] +include-tagged::{doc-tests-file}[{api}-async-listener] -------------------------------------------------- <1> Called when the execution is successfully completed. The response is provided as an argument and contains a list of individual results for each operation that was executed. Note that one or more operations might have failed while the others have been successfully executed. -<2> Called when the whole `IndexUpgradeRequest` fails. In this case the raised +<2> Called when the whole +{request}+ fails. In this case the raised exception is provided as an argument and no operation has been executed. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-async-execute] +include-tagged::{doc-tests-file}[{api}-async-execute] -------------------------------------------------- -<1> The `IndexUpgradeRequest` to execute and the `ActionListener` to use when +<1> The +{request}+ to execute and the `ActionListener` to use when the execution completes The asynchronous method does not block and returns immediately. Once it is @@ -59,11 +65,11 @@ it failed. === Migration Upgrade with Task API -Submission of upgrade request task will requires the `IndexUpgradeRequest` and will return -`IndexUpgradeSubmissionResponse`. The `IndexUpgradeSubmissionResponse` can later be use to fetch +Submission of upgrade request task will requires the +{request}+ and will return ++{submit_response}+. The +{submit_response}+ can later be use to fetch TaskId and query the Task API for results. ["source","java",subs="attributes,callouts,macros"] -------------------------------------------------- -include-tagged::{doc-tests}/MigrationClientDocumentationIT.java[upgrade-task-api] +include-tagged::{doc-tests-file}[{api}-task-api] -------------------------------------------------- diff --git a/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc b/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc new file mode 100644 index 0000000000000..d7686315f0f2f --- /dev/null +++ b/docs/java-rest/high-level/ml/delete-calendar-job.asciidoc @@ -0,0 +1,36 @@ +-- +:api: delete-calendar-job +:request: DeleteCalendarJobRequest +:response: PutCalendarResponse +-- +[id="{upid}-{api}"] +=== Delete Calendar Job API +Removes {ml} jobs from an existing {ml} calendar. +The API accepts a +{request}+ and responds +with a +{response}+ object. + +[id="{upid}-{api}-request"] +==== Delete Calendar Job Request + +A +{request}+ is constructed referencing a non-null +calendar ID, and JobIDs which to remove from the calendar + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The ID of the calendar from which to remove the jobs +<2> The JobIds to remove from the calendar + +[id="{upid}-{api}-response"] +==== Delete Calendar Response + +The returned +{response}+ contains the updated Calendar: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The updated Calendar with the jobs removed + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ml/get-calendar-events.asciidoc b/docs/java-rest/high-level/ml/get-calendar-events.asciidoc new file mode 100644 index 0000000000000..486921fdcc9b2 --- /dev/null +++ b/docs/java-rest/high-level/ml/get-calendar-events.asciidoc @@ -0,0 +1,65 @@ +-- +:api: get-calendar-events +:request: GetCalendarEventsRequest +:response: GetCalendarEventsResponse +-- +[id="{upid}-{api}"] +=== Get Calendar Events API +Retrieves a calendars events. +It accepts a +{request}+ and responds +with a +{response}+ object. + +[id="{upid}-{api}-request"] +==== Get Calendars Request + +A +{request}+ requires a non-null calendar ID. +Using the literal `_all` returns the events for all calendars. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new request for the specified calendarId + +==== Optional Arguments +The following arguments are optional: + + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-page] +-------------------------------------------------- +<1> The page parameters `from` and `size`. `from` specifies the number of events to skip. +`size` specifies the maximum number of events to get. Defaults to `0` and `100` respectively. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-start] +-------------------------------------------------- +<1> Specifies to get events with timestamps after this time. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-end] +-------------------------------------------------- +<1> Specifies to get events with timestamps earlier than this time. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-jobid] +-------------------------------------------------- +<1> Get events for the job. When this option is used calendar_id must be `_all` + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Get calendars Response + +The returned +{response}+ contains the requested events: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The count of events that were matched +<2> The events retrieved \ No newline at end of file diff --git a/docs/java-rest/high-level/ml/post-calendar-event.asciidoc b/docs/java-rest/high-level/ml/post-calendar-event.asciidoc new file mode 100644 index 0000000000000..ba7c69acf03d9 --- /dev/null +++ b/docs/java-rest/high-level/ml/post-calendar-event.asciidoc @@ -0,0 +1,38 @@ +-- +:api: post-calendar-event +:request: PostCalendarEventRequest +:response: PostCalendarEventResponse +-- +[id="{upid}-{api}"] +=== Post Calendar Event API +Adds new ScheduledEvents to an existing {ml} calendar. + +The API accepts a +{request}+ and responds +with a +{response}+ object. + +[id="{upid}-{api}-request"] +==== Post Calendar Event Request + +A +{request}+ is constructed with a calendar ID object +and a non-empty list of scheduled events. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Non-null existing calendar ID +<2> Non-null, non-empty collection of `ScheduledEvent` objects + + +[id="{upid}-{api}-response"] +==== Post Calendar Event Response + +The returned +{response}+ contains the added `ScheduledEvent` objects: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The `ScheduledEvent` objects that were added to the calendar + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ml/put-calendar-job.asciidoc b/docs/java-rest/high-level/ml/put-calendar-job.asciidoc new file mode 100644 index 0000000000000..17fc3a93063ca --- /dev/null +++ b/docs/java-rest/high-level/ml/put-calendar-job.asciidoc @@ -0,0 +1,36 @@ +-- +:api: put-calendar-job +:request: PutCalendarJobRequest +:response: PutCalendarResponse +-- +[id="{upid}-{api}"] +=== Put Calendar Job API +Adds {ml} jobs to an existing {ml} calendar. +The API accepts a +{request}+ and responds +with a +{response}+ object. + +[id="{upid}-{api}-request"] +==== Put Calendar Job Request + +A +{request}+ is constructed referencing a non-null +calendar ID, and JobIDs to which to add to the calendar + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> The ID of the calendar to which to add the jobs +<2> The JobIds to add to the calendar + +[id="{upid}-{api}-response"] +==== Put Calendar Response + +The returned +{response}+ contains the updated Calendar: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The updated Calendar + +include::../execution.asciidoc[] diff --git a/docs/java-rest/high-level/ml/put-datafeed.asciidoc b/docs/java-rest/high-level/ml/put-datafeed.asciidoc index ed8a089c7bed8..8b2b4dd27f1bf 100644 --- a/docs/java-rest/high-level/ml/put-datafeed.asciidoc +++ b/docs/java-rest/high-level/ml/put-datafeed.asciidoc @@ -72,6 +72,7 @@ The window must be larger than the Job's bucket size, but smaller than 24 hours, and span less than 10,000 buckets. Defaults to `null`, which causes an appropriate window span to be calculated when the datafeed runs. +The default `check_window` span calculation is the max between `2h` or `8 * bucket_span`. To explicitly disable, pass `DelayedDataCheckConfig.disabledDelayedDataCheckConfig()`. ["source","java",subs="attributes,callouts,macros"] diff --git a/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc new file mode 100644 index 0000000000000..7c45ce8ebf0a0 --- /dev/null +++ b/docs/java-rest/high-level/ml/revert-model-snapshot.asciidoc @@ -0,0 +1,46 @@ +-- +:api: revert-model-snapshot +:request: RevertModelSnapshotRequest +:response: RevertModelSnapshotResponse +-- +[id="{upid}-{api}"] +=== Revert Model Snapshot API + +The Revert Model Snapshot API provides the ability to revert to a previous {ml} model snapshot. +It accepts a +{request}+ object and responds +with a +{response}+ object. + +[id="{upid}-{api}-request"] +==== Revert Model Snapshot Request + +A +{request}+ requires the following arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `jobId` and `snapshotId` values. + +==== Optional Arguments + +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-delete-intervening-results] +-------------------------------------------------- +<1> A flag indicating whether or not results in the period between the timestamp on the reverted snapshot and the latest results should be deleted + + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Revert Job Response + +A +{response}+ contains the full representation of the reverted `ModelSnapshot`. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> The reverted `ModelSnapshot` diff --git a/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc b/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc new file mode 100644 index 0000000000000..b38539b062224 --- /dev/null +++ b/docs/java-rest/high-level/ml/update-model-snapshot.asciidoc @@ -0,0 +1,53 @@ +-- +:api: update-model-snapshot +:request: UpdateModelSnapshotRequest +:response: UpdateModelSnapshotResponse +-- +[id="{upid}-{api}"] +=== Update Model Snapshot API + +The Update Model Snapshot API provides the ability to update a {ml} model snapshot. +It accepts a +{request}+ object and responds +with a +{response}+ object. + +[id="{upid}-{api}-request"] +==== Update Model Snapshot Request + +A +{request}+ requires the following arguments: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new request referencing existing `jobId` and `snapshotId` values. + +==== Optional Arguments + +The following arguments are optional: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-description] +-------------------------------------------------- +<1> The updated description of the {ml} model snapshot + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-retain] +-------------------------------------------------- +<1> The updated `retain` property of the {ml} model snapshot + + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Update Job Response + +A +{response}+ contains an acknowledgement of the update request and the full representation of the updated `ModelSnapshot` object + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> An acknowledgement of the request +<2> The updated `ModelSnapshot` diff --git a/docs/java-rest/high-level/security/has-privileges.asciidoc b/docs/java-rest/high-level/security/has-privileges.asciidoc new file mode 100644 index 0000000000000..181b1b7f48167 --- /dev/null +++ b/docs/java-rest/high-level/security/has-privileges.asciidoc @@ -0,0 +1,86 @@ +-- +:api: has-privileges +:request: HasPrivilegesRequest +:response: HasPrivilegesResponse +-- + +[id="{upid}-{api}"] +=== Has Privileges API + +[id="{upid}-{api}-request"] +==== Has Privileges Request +The +{request}+ supports checking for any or all of the following privilege types: + +* Cluster Privileges +* Index Privileges +* Application Privileges + +Privileges types that you do not wish to check my be passed in as +null+, but as least +one privilege must be specified. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Has Privileges Response + +The returned +{response}+ contains the following properties + +`username`:: +The username (userid) of the current user (for whom the "has privileges" +check was executed) + +`hasAllRequested`:: +`true` if the user has all of the privileges that were specified in the ++{request}+. Otherwise `false`. + +`clusterPrivileges`:: +A `Map` where each key is the name of one of the cluster +privileges specified in the request, and the value is `true` if the user +has that privilege, and `false` otherwise. ++ +The method `hasClusterPrivilege` can be used to retrieve this information +in a more fluent manner. This method throws an `IllegalArgumentException` +if the privilege was not included in the response (which will be the case +if the privilege was not part of the request). + +`indexPrivileges`:: +A `Map>` where each key is the name of an +index (as specified in the +{request}+) and the value is a `Map` from +privilege name to a `Boolean`. The `Boolean` value is `true` if the user +has that privilege on that index, and `false` otherwise. ++ +The method `hasIndexPrivilege` can be used to retrieve this information +in a more fluent manner. This method throws an `IllegalArgumentException` +if the privilege was not included in the response (which will be the case +if the privilege was not part of the request). + +`applicationPrivileges`:: +A `Map>>>` where each key is the +name of an application (as specified in the +{request}+). +For each application, the value is a `Map` keyed by resource name, with +each value being another `Map` from privilege name to a `Boolean`. +The `Boolean` value is `true` if the user has that privilege on that +resource for that application, and `false` otherwise. ++ +The method `hasApplicationPrivilege` can be used to retrieve this +information in a more fluent manner. This method throws an +`IllegalArgumentException` if the privilege was not included in the +response (which will be the case if the privilege was not part of the +request). + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- +<1> `hasMonitor` will be `true` if the user has the `"monitor"` + cluster privilege. +<2> `hasWrite` will be `true` if the user has the `"write"` + privilege on the `"logstash-2018-10-05"` index. +<3> `hasRead` will be `true` if the user has the `"read"` + privilege on all possible indices that would match + the `"logstash-2018-*"` pattern. + diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 9dda33bf6da45..6a3040e9434d5 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -24,6 +24,7 @@ Multi-document APIs:: * <<{upid}-update-by-query>> * <<{upid}-delete-by-query>> * <<{upid}-rethrottle>> +* <<{upid}-multi-term-vectors>> include::document/index.asciidoc[] include::document/get.asciidoc[] @@ -37,6 +38,8 @@ include::document/reindex.asciidoc[] include::document/update-by-query.asciidoc[] include::document/delete-by-query.asciidoc[] include::document/rethrottle.asciidoc[] +include::document/multi-term-vectors.asciidoc[] + == Search APIs @@ -263,13 +266,19 @@ The Java High Level REST Client supports the following Machine Learning APIs: * <<{upid}-get-categories>> * <<{upid}-get-calendars>> * <<{upid}-put-calendar>> +* <<{upid}-get-calendar-events>> +* <<{upid}-post-calendar-event>> +* <<{upid}-put-calendar-job>> +* <<{upid}-delete-calendar-job>> * <<{upid}-delete-calendar>> * <<{upid}-put-filter>> * <<{upid}-get-filters>> -* <<{upid}-delete-model-snapshot>> * <<{upid}-update-filter>> * <<{upid}-delete-filter>> * <<{upid}-get-model-snapshots>> +* <<{upid}-delete-model-snapshot>> +* <<{upid}-revert-model-snapshot>> +* <<{upid}-update-model-snapshot>> include::ml/put-job.asciidoc[] include::ml/get-job.asciidoc[] @@ -297,13 +306,19 @@ include::ml/get-influencers.asciidoc[] include::ml/get-categories.asciidoc[] include::ml/get-calendars.asciidoc[] include::ml/put-calendar.asciidoc[] +include::ml/get-calendar-events.asciidoc[] +include::ml/post-calendar-event.asciidoc[] +include::ml/put-calendar-job.asciidoc[] +include::ml/delete-calendar-job.asciidoc[] include::ml/delete-calendar.asciidoc[] include::ml/put-filter.asciidoc[] -include::ml/get-model-snapshots.asciidoc[] include::ml/get-filters.asciidoc[] -include::ml/delete-model-snapshot.asciidoc[] include::ml/update-filter.asciidoc[] include::ml/delete-filter.asciidoc[] +include::ml/get-model-snapshots.asciidoc[] +include::ml/delete-model-snapshot.asciidoc[] +include::ml/revert-model-snapshot.asciidoc[] +include::ml/update-model-snapshot.asciidoc[] == Migration APIs @@ -352,6 +367,7 @@ The Java High Level REST Client supports the following Security APIs: * <<{upid}-clear-roles-cache>> * <<{upid}-clear-realm-cache>> * <<{upid}-authenticate>> +* <<{upid}-has-privileges>> * <> * <> * <> @@ -369,6 +385,7 @@ include::security/delete-privileges.asciidoc[] include::security/clear-roles-cache.asciidoc[] include::security/clear-realm-cache.asciidoc[] include::security/authenticate.asciidoc[] +include::security/has-privileges.asciidoc[] include::security/get-certificates.asciidoc[] include::security/put-role-mapping.asciidoc[] include::security/get-role-mappings.asciidoc[] @@ -430,9 +447,13 @@ The Java High Level REST Client supports the following CCR APIs: * <<{upid}-ccr-put-follow>> * <<{upid}-ccr-pause-follow>> +* <<{upid}-ccr-resume-follow>> +* <<{upid}-ccr-unfollow>> include::ccr/put_follow.asciidoc[] include::ccr/pause_follow.asciidoc[] +include::ccr/resume_follow.asciidoc[] +include::ccr/unfollow.asciidoc[] == Index Lifecycle Management APIs @@ -443,8 +464,18 @@ The Java High Level REST Client supports the following Index Lifecycle Management APIs: * <<{upid}-ilm-put-lifecycle-policy>> +* <<{upid}-ilm-delete-lifecycle-policy>> * <<{upid}-ilm-get-lifecycle-policy>> +* <<{upid}-ilm-start-ilm>> +* <<{upid}-ilm-stop-ilm>> +* <<{upid}-ilm-status>> +* <<{upid}-ilm-retry-lifecycle-policy>> + include::ilm/put_lifecycle_policy.asciidoc[] +include::ilm/delete_lifecycle_policy.asciidoc[] include::ilm/get_lifecycle_policy.asciidoc[] - +include::ilm/start_lifecycle_management.asciidoc[] +include::ilm/stop_lifecycle_management.asciidoc[] +include::ilm/lifecycle_management_status.asciidoc[] +include::ilm/retry_lifecycle_policy.asciidoc[] diff --git a/docs/plugins/analysis-icu.asciidoc b/docs/plugins/analysis-icu.asciidoc index 35265140533ed..b48bb35d191da 100644 --- a/docs/plugins/analysis-icu.asciidoc +++ b/docs/plugins/analysis-icu.asciidoc @@ -26,6 +26,24 @@ characters. :plugin_name: analysis-icu include::install_remove.asciidoc[] +[[analysis-icu-analyzer]] +==== ICU Analyzer + +Performs basic normalization, tokenization and character folding, using the +`icu_normalizer` char filter, `icu_tokenizer` and `icu_normalizer` token filter + +The following parameters are accepted: + +[horizontal] + +`method`:: + + Normalization method. Accepts `nfkc`, `nfc` or `nfkc_cf` (default) + +`mode`:: + + Normalization mode. Accepts `compose` (default) or `decompose`. + [[analysis-icu-normalization-charfilter]] ==== ICU Normalization Character Filter diff --git a/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc b/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc index 1541b46f4c3d1..12d1e5e6216ae 100644 --- a/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-bucket-significanttext-aggregation]] === Significant Text Aggregation -experimental[] - An aggregation that returns interesting or unusual occurrences of free-text terms in a set. It is like the <> aggregation but differs in that: @@ -27,8 +25,6 @@ The significant words are the ones that have undergone a significant change in p If the term "H5N1" only exists in 5 documents in a 10 million document index and yet is found in 4 of the 100 documents that make up a user's search results that is significant and probably very relevant to their search. 5/10,000,000 vs 4/100 is a big swing in frequency. -experimental[The `significant_text` aggregation is new and may change in non-backwards compatible ways if we add further text-analysis features e.g. phrase detection] - ==== Basic use In the typical use case, the _foreground_ set of interest is a selection of the top-matching search results for a query diff --git a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc index 78f7607b1e443..9a4dcbe8aaac7 100644 --- a/docs/reference/analysis/analyzers/lang-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/lang-analyzer.asciidoc @@ -421,6 +421,9 @@ PUT /catalan_example [[cjk-analyzer]] ===== `cjk` analyzer +NOTE: You may find that `icu_analyzer` in the ICU analysis plugin works better +for CJK text than the `cjk` analyzer. Experiment with your text and queries. + The `cjk` analyzer could be reimplemented as a `custom` analyzer as follows: [source,js] diff --git a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc index d9e09f158c494..911f0801cb3c7 100644 --- a/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/delete-auto-follow-pattern.asciidoc @@ -12,7 +12,8 @@ Delete auto-follow patterns. ==== Description -This API deletes a configured auto-follow pattern collection. +This API deletes a configured collection of +{stack-ov}/ccr-auto-follow.html[auto-follow patterns]. ==== Request diff --git a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc index ba32a1ee49a67..253087776dc06 100644 --- a/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/get-auto-follow-pattern.asciidoc @@ -12,8 +12,8 @@ Get auto-follow patterns. ==== Description -This API gets configured auto-follow patterns. This API will return the -specified auto-follow pattern collection. +This API gets configured {stack-ov}/ccr-auto-follow.html[auto-follow patterns]. +This API will return the specified auto-follow pattern collection. ==== Request diff --git a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc index f4b53382bda4a..f84c36be85358 100644 --- a/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc +++ b/docs/reference/ccr/apis/auto-follow/put-auto-follow-pattern.asciidoc @@ -12,10 +12,11 @@ Creates an auto-follow pattern. ==== Description -This API creates a new named collection of auto-follow patterns against the -remote cluster specified in the request body. Newly created indices on the -remote cluster matching any of the specified patterns will be automatically -configured as follower indices. +This API creates a new named collection of +{stack-ov}/ccr-auto-follow.html[auto-follow patterns] against the remote cluster +specified in the request body. Newly created indices on the remote cluster +matching any of the specified patterns will be automatically configured as follower +indices. ==== Request diff --git a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc index 459f13c8a31c2..e330701aaf397 100644 --- a/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-pause-follow.asciidoc @@ -48,6 +48,14 @@ POST //_ccr/pause_follow `follower_index` (required):: (string) the name of the follower index + +==== Authorization + +If the {es} {security-features} are enabled, you must have `manage_ccr` cluster +privileges on the cluster that contains the follower index. For more information, +see {stack-ov}/security-privileges.html[Security privileges]. + + ==== Example This example pauses a follower index named `follower_index`: diff --git a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc index 2595d4e0a197e..55da1b0cbd4ca 100644 --- a/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/post-resume-follow.asciidoc @@ -63,6 +63,14 @@ POST //_ccr/resume_follow ==== Request Body include::../follow-request-body.asciidoc[] +==== Authorization + +If the {es} {security-features} are enabled, you must have `write` and `monitor` +index privileges for the follower index. You must have `read` and `monitor` +index privileges for the leader index. You must also have `manage_ccr` cluster +privileges on the cluster that contains the follower index. For more information, +see {stack-ov}/security-privileges.html[Security privileges]. + ==== Example This example resumes a follower index named `follower_index`: diff --git a/docs/reference/ccr/apis/follow/put-follow.asciidoc b/docs/reference/ccr/apis/follow/put-follow.asciidoc index 9e6106a6a774d..6386a2b09a5fc 100644 --- a/docs/reference/ccr/apis/follow/put-follow.asciidoc +++ b/docs/reference/ccr/apis/follow/put-follow.asciidoc @@ -58,6 +58,15 @@ PUT //_ccr/follow include::../follow-request-body.asciidoc[] +==== Authorization + +If the {es} {security-features} are enabled, you must have `write`, `monitor`, +and `manage_follow_index` index privileges for the follower index. You must have +`read` and `monitor` index privileges for the leader index. You must also have +`manage_ccr` cluster privileges on the cluster that contains the follower index. +For more information, see +{stack-ov}/security-privileges.html[Security privileges]. + ==== Example This example creates a follower index named `follower_index`: diff --git a/docs/reference/ccr/apis/get-ccr-stats.asciidoc b/docs/reference/ccr/apis/get-ccr-stats.asciidoc index 630082fdc9bab..2a452e2befde6 100644 --- a/docs/reference/ccr/apis/get-ccr-stats.asciidoc +++ b/docs/reference/ccr/apis/get-ccr-stats.asciidoc @@ -3,7 +3,7 @@ [[ccr-get-stats]] === Get Cross-Cluster Replication Stats API ++++ -Get Follower Stats +Get CCR Stats ++++ beta[] diff --git a/docs/reference/ccr/auto-follow.asciidoc b/docs/reference/ccr/auto-follow.asciidoc index 443af594df86a..a7f4b95f42202 100644 --- a/docs/reference/ccr/auto-follow.asciidoc +++ b/docs/reference/ccr/auto-follow.asciidoc @@ -1,6 +1,6 @@ [role="xpack"] [testenv="platinum"] -[[ccr-overview-auto-follow]] +[[ccr-auto-follow]] === Automatically following indices beta[] diff --git a/docs/reference/ccr/getting-started.asciidoc b/docs/reference/ccr/getting-started.asciidoc index 51d93227be20c..c87ab3b03867e 100644 --- a/docs/reference/ccr/getting-started.asciidoc +++ b/docs/reference/ccr/getting-started.asciidoc @@ -278,11 +278,11 @@ POST /server-metrics-copy/_ccr/unfollow [[ccr-getting-started-auto-follow]] === Automatically create follower indices -The auto-follow feature in {ccr} helps for time series use cases where you want -to follow new indices that are periodically created in the remote cluster -(such as daily Beats indices). Auto-following is configured using the -{ref}/ccr-put-auto-follow-pattern.html[create auto-follow pattern API]. With an -auto-follow pattern, you reference the +The <> feature in {ccr} helps for time series use +cases where you want to follow new indices that are periodically created in the +remote cluster (such as daily Beats indices). Auto-following is configured using +the {ref}/ccr-put-auto-follow-pattern.html[create auto-follow pattern API]. With +an auto-follow pattern, you reference the <> that you connected your local cluster to. You must also specify a collection of patterns that match the indices you want to automatically follow. diff --git a/docs/reference/ccr/index.asciidoc b/docs/reference/ccr/index.asciidoc index aefa0ec7e8b6b..be281d05c05f3 100644 --- a/docs/reference/ccr/index.asciidoc +++ b/docs/reference/ccr/index.asciidoc @@ -9,7 +9,8 @@ beta[] The {ccr} (CCR) feature enables replication of indices in remote clusters to a -local cluster. This functionality can be used in some common production use cases: +local cluster. This functionality can be used in some common production use +cases: * Disaster recovery in case a primary cluster fails. A secondary cluster can serve as a hot backup @@ -19,7 +20,7 @@ This guide provides an overview of {ccr}: * <> * <> -* <> +* <> * <> -- diff --git a/docs/reference/ccr/overview.asciidoc b/docs/reference/ccr/overview.asciidoc index 633da63288bba..0ad9039d8710d 100644 --- a/docs/reference/ccr/overview.asciidoc +++ b/docs/reference/ccr/overview.asciidoc @@ -27,7 +27,7 @@ Replication can be configured in two ways: {ref}/ccr-put-follow.html[create follower API] * Automatically using - <> + <> NOTE: You must also <>. diff --git a/docs/reference/frozen-indices.asciidoc b/docs/reference/frozen-indices.asciidoc new file mode 100644 index 0000000000000..28264547575b7 --- /dev/null +++ b/docs/reference/frozen-indices.asciidoc @@ -0,0 +1,56 @@ +[role="xpack"] +[testenv="basic"] +[[frozen-indices]] += Frozen Indices + +[partintro] +-- +Elasticsearch indices can require a significant amount of memory available in order to be open and searchable. Yet, not all indices need +to be writable at the same time and have different access patterns over time. For example, indices in the time series or logging use cases +are unlikely to be queried once they age out but still need to be kept around for retention policy purposes. + +In order to keep indices available and queryable for a longer period but at the same time reduce their hardware requirements they can be transitioned +into a frozen state. Once an index is frozen, all of its transient shard memory (aside from mappings and analyzers) +is moved to persistent storage. This allows for a much higher disk to heap storage ratio on individual nodes. Once an index is +frozen, it is made read-only and drops its transient data structures from memory. These data structures will need to be reloaded on demand (and subsequently dropped) for each search request that targets the frozen index. A search request that hits +one or more frozen shards will be executed on a throttled threadpool that ensures that we never search more than +`N` (`1` by default) searches concurrently (see <>). This protects nodes from exceeding the available memory due to incoming search requests. + +In contrast to ordinary open indices, frozen indices are expected to execute slowly and are not designed for high query load. Parallelism is +gained only on a per-node level and loading data-structures on demand is expected to be one or more orders of a magnitude slower than query +execution on a per shard level. Depending on the data in an index, a frozen index may execute searches in the seconds to minutes range, when the same index in an unfrozen state may execute the same search request in milliseconds. +-- + +== Best Practices + +Since frozen indices provide a much higher disk to heap ratio at the expense of search latency, it is advisable to allocate frozen indices to +dedicated nodes to prevent searches on frozen indices influencing traffic on low latency nodes. There is significant overhead in loading +data structures on demand which can cause page faults and garbage collections, which further slow down query execution. + +Since indices that are eligible for freezing are unlikely to change in the future, disk space can be optimized as described in <>. + +== Searching a frozen index + +Frozen indices are throttled in order to limit memory consumptions per node. The number of concurrently loaded frozen indices per node is +limited by the number of threads in the <> threadpool, which is `1` by default. +Search requests will not be executed against frozen indices by default, even if a frozen index is named explicitly. This is +to prevent accidental slowdowns by targeting a frozen index by mistake. To include frozen indices a search request must be executed with +the query parameter `ignore_throttled=false`. + +[source,js] +-------------------------------------------------- +GET /twitter/_search?q=user:kimchy&ignore_throttled=false +-------------------------------------------------- +// CONSOLE +// TEST[setup:twitter] + +[IMPORTANT] +================================ +While frozen indices are slow to search, they can be pre-filtered efficiently. The request parameter `pre_filter_shard_size` specifies +a threshold that, when exceeded, will enforce a round-trip to pre-filter search shards that cannot possibly match. +This filter phase can limit the number of shards significantly. For instance, if a date range filter is applied, then all indices (frozen or unfrozen) that do not contain documents within the date range can be skipped efficiently. +The default value for `pre_filter_shard_size` is `128` but it's recommended to set it to `1` when searching frozen indices. There is no +significant overhead associated with this pre-filter phase. +================================ + + diff --git a/docs/reference/ilm/get-index-lifecycle-information.asciidoc b/docs/reference/ilm/get-index-lifecycle-information.asciidoc deleted file mode 100644 index e30a0beb872a1..0000000000000 --- a/docs/reference/ilm/get-index-lifecycle-information.asciidoc +++ /dev/null @@ -1,7 +0,0 @@ -[role="xpack"] -[[get-index-lifecycle-information]] -== Get index lifecycle information - -Execution Model -Discuss how actions are actually split up into discrete steps and how you can see more information about where an index is within a policy (info and all) -Talk about the jump-to-step API diff --git a/docs/reference/ilm/index.asciidoc b/docs/reference/ilm/index.asciidoc index b568203a6a220..dcee0c06f4255 100644 --- a/docs/reference/ilm/index.asciidoc +++ b/docs/reference/ilm/index.asciidoc @@ -62,8 +62,6 @@ include::using-policies-rollover.asciidoc[] include::update-lifecycle-policy.asciidoc[] -// include::get-index-lifecycle-information.asciidoc[] - include::error-handling.asciidoc[] include::start-stop-ilm.asciidoc[] diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index 4c083511be3ec..7b7008d613fb3 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -71,6 +71,9 @@ index is rolled over, then `min_age` is the time elapsed from the time the index is rolled over. The intention here is to execute following phases and actions relative to when data was written last to a rolled over index. +The previous phase's actions must complete before {ILM} will check `min_age` and +transition into the next phase. + === Actions beta[] @@ -449,6 +452,41 @@ PUT _ilm/policy/my_policy // CONSOLE +===== Example: Rollover condition stalls phase transition + +The Rollover action will only complete once one of its conditions is +met. This means that any proceeding phases will be blocked until Rollover +succeeds. + +[source,js] +-------------------------------------------------- +PUT /_ilm/policy/rollover_policy +{ + "policy": { + "phases": { + "hot": { + "actions": { + "rollover": { + "max_size": "50G" + } + } + }, + "delete": { + "min_age": "1d", + "actions": { + "delete": {} + } + } + } + } +} +-------------------------------------------------- +// CONSOLE + +The above example illustrates a policy which attempts to delete an +index one day after the index has been rolled over. It does not +delete the index one day after it has been created. + [[ilm-shrink-action]] ==== Shrink diff --git a/docs/reference/index-modules/slowlog.asciidoc b/docs/reference/index-modules/slowlog.asciidoc index da13cd7decf52..9bad81fbff39e 100644 --- a/docs/reference/index-modules/slowlog.asciidoc +++ b/docs/reference/index-modules/slowlog.asciidoc @@ -102,13 +102,8 @@ appender.index_indexing_slowlog_rolling.type = RollingFile appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs}_index_indexing_slowlog.log appender.index_indexing_slowlog_rolling.layout.type = PatternLayout -<<<<<<< HEAD -appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n -appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log -======= appender.index_indexing_slowlog_rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] [%node_name]%marker %.-10000m%n -appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_index_indexing_slowlog-%i.log.gz ->>>>>>> 22459576d75... Logging: Make node name consistent in logger (#31588) +appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log appender.index_indexing_slowlog_rolling.policies.type = Policies appender.index_indexing_slowlog_rolling.policies.time.type = TimeBasedTriggeringPolicy appender.index_indexing_slowlog_rolling.policies.time.interval = 1 diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 6f80d95079e26..e6d7a979f808c 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -65,6 +65,8 @@ include::monitoring/index.asciidoc[] include::rollup/index.asciidoc[] +include::frozen-indices.asciidoc[] + include::rest-api/index.asciidoc[] include::commands/index.asciidoc[] diff --git a/docs/reference/indices/apis/freeze.asciidoc b/docs/reference/indices/apis/freeze.asciidoc new file mode 100644 index 0000000000000..5ca9ecbc6b801 --- /dev/null +++ b/docs/reference/indices/apis/freeze.asciidoc @@ -0,0 +1,50 @@ +[role="xpack"] +[testenv="basic"] +[[freeze-index-api]] +== Freeze Index API +++++ +Freeze Index +++++ + +Freezes an index. + +[float] +=== Request + +`POST //_freeze` + +[float] +=== Description + +A frozen index has almost no overhead on the cluster (except +for maintaining its metadata in memory), and is blocked for write operations. +See <> and <>. + +[float] +=== Path Parameters + +`index` (required):: +(string) Identifier for the index + +//=== Query Parameters + +//=== Authorization + +[float] +=== Examples + +The following example freezes and unfreezes an index: + +[source,js] +-------------------------------------------------- +POST /my_index/_freeze +POST /my_index/_unfreeze +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\n/] + +[IMPORTANT] +================================ + Freezing an index will close the index and reopen it within the same API call. This causes primaries to not be allocated for a short + amount of time and causes the cluster to go red until the primaries are allocated again. This limitation might be removed in the future. +================================ diff --git a/docs/reference/indices/apis/unfreeze.asciidoc b/docs/reference/indices/apis/unfreeze.asciidoc new file mode 100644 index 0000000000000..4a01813463516 --- /dev/null +++ b/docs/reference/indices/apis/unfreeze.asciidoc @@ -0,0 +1,50 @@ +[role="xpack"] +[testenv="basic"] +[[unfreeze-index-api]] +== Unfreeze Index API +++++ +Unfreeze Index +++++ + +Unfreezes an index. + +[float] +=== Request + +`POST //_unfreeze` + +[float] +=== Description + +When a frozen index is unfrozen, the index goes through the normal recovery +process and becomes writeable again. See <> and <>. + +[float] +=== Path Parameters + +`index` (required):: +(string) Identifier for the index + + +//=== Query Parameters + +//=== Authorization + +[float] +=== Examples + +The following example freezes and unfreezes an index: + +[source,js] +-------------------------------------------------- +POST /my_index/_freeze +POST /my_index/_unfreeze +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_index\n/] + +[IMPORTANT] +================================ + Freezing an index will close the index and reopen it within the same API call. This causes primaries to not be allocated for a short + amount of time and causes the cluster to go red until the primaries are allocated again. This limitation might be removed in the future. +================================ diff --git a/docs/reference/mapping/params/multi-fields.asciidoc b/docs/reference/mapping/params/multi-fields.asciidoc index 1489304bbaa60..2f973a3f62b3a 100644 --- a/docs/reference/mapping/params/multi-fields.asciidoc +++ b/docs/reference/mapping/params/multi-fields.asciidoc @@ -127,7 +127,7 @@ the second document. The `text.english` field contains `fox` for both documents, because `foxes` is stemmed to `fox`. The query string is also analyzed by the `standard` analyzer for the `text` -field, and by the `english` analyzer` for the `text.english` field. The +field, and by the `english` analyzer for the `text.english` field. The stemmed field allows a query for `foxes` to also match the document containing just `fox`. This allows us to match as many documents as possible. By also querying the unstemmed `text` field, we improve the relevance score of the diff --git a/docs/reference/ml/apis/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc index 73361b12454b9..9e85dbdba06bc 100644 --- a/docs/reference/ml/apis/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -111,6 +111,7 @@ The configuration object has the following properties: (time units) The window of time before the latest finalized bucket that should be searched for late data. Defaults to `null` which causes an appropriate `check_window` to be calculated when the real-time {dfeed} runs. + The default `check_window` span calculation is the max between `2h` or `8 * bucket_span`. [float] [[ml-datafeed-counts]] diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 9519dbf3e03a8..b018ac8281e4b 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -25,6 +25,10 @@ There are several thread pools, but the important ones include: `int((# of available_processors * 3) / 2) + 1`, and initial queue_size of `1000`. +[[search-throttled]]`search_throttled`:: + For count/search/suggest/get operations on `search_throttled indices`. Thread pool type is + `fixed_auto_queue_size` with a size of `1`, and initial queue_size of `100`. + `get`:: For get operations. Thread pool type is `fixed` with a size of `# of available processors`, diff --git a/docs/reference/query-dsl/query-string-query.asciidoc b/docs/reference/query-dsl/query-string-query.asciidoc index f047d75d7f1aa..e4b4fb50904b9 100644 --- a/docs/reference/query-dsl/query-string-query.asciidoc +++ b/docs/reference/query-dsl/query-string-query.asciidoc @@ -103,7 +103,10 @@ phrase matches are required. Default value is `0`. |`boost` |Sets the boost value of the query. Defaults to `1.0`. -|`auto_generate_phrase_queries` |Defaults to `false`. +|`auto_generate_phrase_queries` | Deprecated setting. This setting is ignored, +use [type=phrase] instead to make phrase queries out of all text that is +within query operators, or use explicitly quoted strings if you need +finer-grained control. |`analyze_wildcard` |By default, wildcards terms in a query string are not analyzed. By setting this value to `true`, a best effort will be @@ -153,8 +156,11 @@ When not explicitly specifying the field to search on in the query string syntax, the `index.query.default_field` will be used to derive which field to search on. If the `index.query.default_field` is not specified, the `query_string` will automatically attempt to determine the existing fields in the index's -mapping that are queryable, and perform the search on those fields. Note that this will not -include nested documents, use a nested query to search those documents. +mapping that are queryable, and perform the search on those fields. +This will not include nested documents, use a nested query to search those documents. + +NOTE: For mappings with a large number of fields, searching across all queryable +fields in the mapping could be expensive. [float] ==== Multi Field diff --git a/docs/reference/query-dsl/script-query.asciidoc b/docs/reference/query-dsl/script-query.asciidoc index a2bb49212034e..917991e3211c6 100644 --- a/docs/reference/query-dsl/script-query.asciidoc +++ b/docs/reference/query-dsl/script-query.asciidoc @@ -11,7 +11,7 @@ GET /_search { "query": { "bool" : { - "must" : { + "filter" : { "script" : { "script" : { "source": "doc['num1'].value > 1", @@ -38,7 +38,7 @@ GET /_search { "query": { "bool" : { - "must" : { + "filter" : { "script" : { "script" : { "source" : "doc['num1'].value > params.param1", diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index 9b7c776f8b039..ceb0b85cd5bde 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -5,6 +5,7 @@ -- This section summarizes the changes in each release. +* <> * <> * <> * <> diff --git a/docs/reference/release-notes/6.5.asciidoc b/docs/reference/release-notes/6.5.asciidoc index 2a3d95fbe7649..2b35126a8c701 100644 --- a/docs/reference/release-notes/6.5.asciidoc +++ b/docs/reference/release-notes/6.5.asciidoc @@ -44,11 +44,49 @@ // === Known Issues //// -[[release-notes-6.5.0]] -== 6.5.0 Release Notes +[[release-notes-6.5.1]] +== {es} version 6.5.1 + +Also see <>. + +[float] +[[enhancement-6.5.1]] +=== Enhancements + +Authorization:: +* Grant .tasks access to kibana_system role {pull}35573[#35573] + +Search:: +* has_parent builder: exception message/param fix {pull}31182[#31182] + +[float] +[[bug-6.5.1]] +=== Bug fixes + +Aggregations:: +* Correct implemented interface of ParsedReverseNested {pull}35455[#35455] (issue: {issue}35449[#35449]) +* Handle IndexOrDocValuesQuery in composite aggregation {pull}35392[#35392] +* Preserve `format` when aggregation contains unmapped date fields {pull}35254[#35254] (issue: {issue}31760[#31760]) + +Infra/Core:: +* Upgrade to Joda 2.10.1 {pull}35410[#35410] (issue: {issue}33749[#33749]) + +Machine Learning:: +* Fix find_file_structure NPE with should_trim_fields {pull}35465[#35465] (issue: {issue}35462[#35462]) +* Prevent notifications being created on deletion of a non existent job {pull}35337[#35337] (issues: {issue}34058[#34058], {issue}35336[#35336]) +SQL:: +* Fix query translation for scripted queries {pull}35408[#35408] (issue: {issue}35232[#35232]) +* Clear the cursor if nested inner hits are enough to fulfill the query required limits {pull}35398[#35398] (issue: {issue}35176[#35176]) +* SQL: Introduce IsNull node to simplify expressions {pull}35206[#35206] (issues: {issue}34876[#34876], {issue}35171[#35171]) + +Scripting:: +* [Painless] Partially fixes def boxed types casting {pull}35563[#35563] (issue: {issue}35351[#35351]) + +[[release-notes-6.5.0]] +== {es} version 6.5.0 -Also see <>. +Also see <>. [[breaking-6.5.0]] [float] diff --git a/docs/reference/release-notes/highlights-6.5.0.asciidoc b/docs/reference/release-notes/highlights-6.5.0.asciidoc index 14ab65af307fc..e3499abc1a2d0 100644 --- a/docs/reference/release-notes/highlights-6.5.0.asciidoc +++ b/docs/reference/release-notes/highlights-6.5.0.asciidoc @@ -78,7 +78,18 @@ currently the same as JDBC. An alpha version of the ODBC client is now available for download. [float] -=== Cross-cluster replication (beta) +=== Delegate authorization to other realms + +If you enable the {es} {security-features}, some realms now have the +ability to perform _authentication_ internally then delegate _authorization_ to +another realm. For example, you could authenticate using PKI then delegate to an +LDAP realm for role information. The realms that support this feature have a +new `authorization_realms` setting that you can configure in the +`elasticsearch.yml` file. For more information, see +{stack-ov}/realm-chains.html#authorization_realms[Realm chains] and <>. + +[float] +=== Cross-cluster replication (beta^*^) Cross-cluster replication enables you to replicate indices that exist in remote clusters to your local cluster. You create an index in your local cluster @@ -93,7 +104,7 @@ For more information, see {stack-ov}/xpack-ccr.html[Cross-cluster replication] and <>. [float] -=== Monitor {es} with {metricbeat} (beta) +=== Monitor {es} with {metricbeat} (beta^*^) In 6.4 and later, you can use {metricbeat} to collect data about {kib} and ship it directly to your monitoring cluster, rather than routing it through {es}. Now @@ -101,14 +112,9 @@ in 6.5, you can also use {metricbeat} to collect and ship data about {es}. If you are monitoring {ls} or Beats, at this time you must still use exporters to route the data. See <> and {stack-ov}/how-monitoring-works.html[How monitoring works]. + -[float] -=== Delegate authorization to other realms - -If you enable the {es} {security-features}, some realms now have the -ability to perform _authentication_ internally then delegate _authorization_ to -another realm. For example, you could authenticate using PKI then delegate to an -LDAP realm for role information. The realms that support this feature have a -new `authorization_realms` setting that you can configure in the -`elasticsearch.yml` file. For more information, see -{stack-ov}/realm-chains.html#authorization_realms[Realm chains] and <>. +^*^ This functionality is in beta and is subject to change. The design and code +is less mature than official GA features and is being provided as-is with no +warranties. Please try this functionality in your test and development environments +and provide feedback in the https://discuss.elastic.co/[Elastic community forums]. diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index eedc2dfa1f51f..e834249724aa3 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -10,6 +10,7 @@ directly to configure and access {xpack} features. * <> * <> * <> +* <>, <> * <> * <> * <> @@ -23,11 +24,13 @@ directly to configure and access {xpack} features. include::info.asciidoc[] include::{es-repo-dir}/ccr/apis/ccr-apis.asciidoc[] include::{es-repo-dir}/graph/explore.asciidoc[] +include::{es-repo-dir}/indices/apis/freeze.asciidoc[] include::{es-repo-dir}/ilm/apis/ilm-api.asciidoc[] include::{es-repo-dir}/licensing/index.asciidoc[] include::{es-repo-dir}/migration/migration.asciidoc[] include::{es-repo-dir}/ml/apis/ml-api.asciidoc[] include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] +include::{es-repo-dir}/indices/apis/unfreeze.asciidoc[] include::{xes-repo-dir}/rest-api/watcher.asciidoc[] include::defs.asciidoc[] diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index a12b27151c0a8..143345b1dcec0 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -94,11 +94,10 @@ The `vm.max_map_count` setting must be set within the xhyve virtual machine: ["source","sh"] -------------------------------------------- -$ screen ~/Library/Containers/com.docker.docker/Data/com.docker.driver.amd64-linux/tty +$ screen ~/Library/Containers/com.docker.docker/Data/vms/0/tty -------------------------------------------- -Log in with 'root' and no password. -Then configure the `sysctl` setting as you would for Linux: +Just press enter and configure the `sysctl` setting as you would for Linux: ["source","sh"] -------------------------------------------- diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc new file mode 100644 index 0000000000000..6879c4816b48e --- /dev/null +++ b/docs/reference/sql/functions/conditional.asciidoc @@ -0,0 +1,46 @@ +[role="xpack"] +[testenv="basic"] +[[sql-functions-conditional]] +=== Conditional Functions + +Functions that return one of their arguments by evaluating in an if-else manner. + +[[sql-functions-conditional-coalesce]] +==== `COALESCE` + +.Synopsis +[source, sql] +---- +COALESCE ( expression<1>, expression<2>, ... ) +---- + +*Input*: + +<1> 1st expression + +<2> 2nd expression + +... + +**N**th expression + +COALESCE can take an arbitrary number of arguments. + +*Output*: one of the expressions or `null` + +.Description + +Returns the first of its arguments that is not null. +If all arguments are null, then it returns `null`. + + + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[coalesceReturnNonNull] +---- + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs.csv-spec[coalesceReturnNull] +---- diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 82e8154de93c9..97115748fe982 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -12,6 +12,7 @@ * <> * <> * <> +* <> include::operators.asciidoc[] include::aggs.asciidoc[] @@ -20,3 +21,4 @@ include::search.asciidoc[] include::math.asciidoc[] include::string.asciidoc[] include::type-conversion.asciidoc[] +include::conditional.asciidoc[] diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java index 5b8e6adeb5b5d..5ec480c1bad49 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/TransportUpdateByQueryAction.java @@ -95,8 +95,8 @@ static class AsyncIndexBySearchAction extends AbstractAsyncBulkByScrollAction */ public class URLRepository extends BlobStoreRepository { + private static final Logger logger = LogManager.getLogger(URLRepository.class); public static final String TYPE = "url"; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 5373f09a48d92..9ceaa8a6d3430 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -42,8 +42,8 @@ import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.handler.timeout.ReadTimeoutException; import io.netty.handler.timeout.ReadTimeoutHandler; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.ExceptionsHelper; diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index f2c6c7496ee2c..f26e55d7ac647 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -36,6 +36,8 @@ import io.netty.channel.socket.nio.NioSocketChannel; import io.netty.util.AttributeKey; import io.netty.util.concurrent.Future; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; @@ -73,6 +75,7 @@ * sending out ping requests to other nodes. */ public class Netty4Transport extends TcpTransport { + private static final Logger logger = LogManager.getLogger(Netty4Transport.class); static { Netty4Utils.setup(); diff --git a/modules/tribe/src/main/java/org/elasticsearch/tribe/TribeService.java b/modules/tribe/src/main/java/org/elasticsearch/tribe/TribeService.java index b9553ce6ca058..12411d70c2ebe 100644 --- a/modules/tribe/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/modules/tribe/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -20,6 +20,7 @@ package org.elasticsearch.tribe; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.apache.lucene.util.BytesRef; @@ -94,6 +95,7 @@ * to propagate to the relevant cluster. */ public class TribeService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(TribeService.class); public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE)); diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuAnalyzerProvider.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuAnalyzerProvider.java new file mode 100644 index 0000000000000..80430e96b1e7a --- /dev/null +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/analysis/IcuAnalyzerProvider.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import com.ibm.icu.text.Normalizer2; +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.Tokenizer; +import org.apache.lucene.analysis.icu.ICUFoldingFilter; +import org.apache.lucene.analysis.icu.ICUNormalizer2CharFilter; +import org.apache.lucene.analysis.icu.segmentation.ICUTokenizer; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; + +import java.io.Reader; + +public class IcuAnalyzerProvider extends AbstractIndexAnalyzerProvider { + + private final Normalizer2 normalizer; + + public IcuAnalyzerProvider(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(indexSettings, name, settings); + String method = settings.get("method", "nfkc_cf"); + String mode = settings.get("mode", "compose"); + if (!"compose".equals(mode) && !"decompose".equals(mode)) { + throw new IllegalArgumentException("Unknown mode [" + mode + "] in analyzer [" + name + + "], expected one of [compose, decompose]"); + } + Normalizer2 normalizer = Normalizer2.getInstance( + null, method, "compose".equals(mode) ? Normalizer2.Mode.COMPOSE : Normalizer2.Mode.DECOMPOSE); + this.normalizer = IcuNormalizerTokenFilterFactory.wrapWithUnicodeSetFilter(normalizer, settings); + } + + @Override + public Analyzer get() { + return new Analyzer() { + + @Override + protected Reader initReader(String fieldName, Reader reader) { + return new ICUNormalizer2CharFilter(reader, normalizer); + } + + @Override + protected TokenStreamComponents createComponents(String fieldName) { + Tokenizer source = new ICUTokenizer(); + return new TokenStreamComponents(source, new ICUFoldingFilter(source)); + } + }; + } +} diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/AnalysisICUPlugin.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/AnalysisICUPlugin.java index 58ebdc8e2a801..9d5ed5af7bdca 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/AnalysisICUPlugin.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/plugin/analysis/icu/AnalysisICUPlugin.java @@ -21,8 +21,11 @@ import static java.util.Collections.singletonMap; +import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.index.analysis.AnalyzerProvider; import org.elasticsearch.index.analysis.CharFilterFactory; +import org.elasticsearch.index.analysis.IcuAnalyzerProvider; import org.elasticsearch.index.analysis.IcuCollationTokenFilterFactory; import org.elasticsearch.index.analysis.IcuFoldingTokenFilterFactory; import org.elasticsearch.index.analysis.IcuNormalizerCharFilterFactory; @@ -60,6 +63,11 @@ public Map> getTokenFilters() { return extra; } + @Override + public Map>> getAnalyzers() { + return singletonMap("icu_analyzer", IcuAnalyzerProvider::new); + } + @Override public Map> getTokenizers() { return singletonMap("icu_tokenizer", IcuTokenizerFactory::new); diff --git a/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuAnalyzerTests.java b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuAnalyzerTests.java new file mode 100644 index 0000000000000..d15c9524db18d --- /dev/null +++ b/plugins/analysis-icu/src/test/java/org/elasticsearch/index/analysis/IcuAnalyzerTests.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.analysis; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.BaseTokenStreamTestCase; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin; +import org.elasticsearch.test.IndexSettingsModule; + +import java.io.IOException; + +import static org.hamcrest.Matchers.containsString; + +public class IcuAnalyzerTests extends BaseTokenStreamTestCase { + + public void testMixedAlphabetTokenization() throws IOException { + + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + + String input = "안녕은하철도999극장판2.1981년8월8일.일본개봉작1999년재더빙video판"; + + AnalysisICUPlugin plugin = new AnalysisICUPlugin(); + Analyzer analyzer = plugin.getAnalyzers().get("icu_analyzer").get(idxSettings, null, "icu", settings).get(); + assertAnalyzesTo(analyzer, input, + new String[]{"안녕은하철도", "999", "극장판", "2.1981", "년", "8", "월", "8", "일", "일본개봉작", "1999", "년재더빙", "video", "판"}); + + } + + public void testMiddleDots() throws IOException { + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + + String input = "경승지·산악·협곡·해협·곶·심연·폭포·호수·급류"; + + Analyzer analyzer = new IcuAnalyzerProvider(idxSettings, null, "icu", settings).get(); + assertAnalyzesTo(analyzer, input, + new String[]{"경승지", "산악", "협곡", "해협", "곶", "심연", "폭포", "호수", "급류"}); + } + + public void testUnicodeNumericCharacters() throws IOException { + + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + + String input = "① ② ③ ⑴ ⑵ ⑶ ¼ ⅓ ⅜ ¹ ² ³ ₁ ₂ ₃"; + + Analyzer analyzer = new IcuAnalyzerProvider(idxSettings, null, "icu", settings).get(); + assertAnalyzesTo(analyzer, input, + new String[]{"1", "2", "3", "1", "2", "3", "1/4", "1/3", "3/8", "1", "2", "3", "1", "2", "3"}); + } + + public void testBadSettings() { + + Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put("mode", "wrong") + .build(); + IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> { + new IcuAnalyzerProvider(idxSettings, null, "icu", settings); + }); + + assertThat(e.getMessage(), containsString("Unknown mode [wrong] in analyzer [icu], expected one of [compose, decompose]")); + + } + +} diff --git a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java index 4a922e36084e1..710dfa7e71df7 100644 --- a/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java +++ b/plugins/discovery-azure-classic/src/main/java/org/elasticsearch/cloud/azure/classic/management/AzureComputeServiceImpl.java @@ -33,6 +33,8 @@ import com.microsoft.windowsazure.management.compute.ComputeManagementService; import com.microsoft.windowsazure.management.compute.models.HostedServiceGetDetailedResponse; import com.microsoft.windowsazure.management.configuration.ManagementConfiguration; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.SpecialPermission; import org.elasticsearch.cloud.azure.classic.AzureServiceRemoteException; @@ -43,6 +45,8 @@ public class AzureComputeServiceImpl extends AbstractLifecycleComponent implements AzureComputeService { + private static final Logger logger = LogManager.getLogger(AzureComputeServiceImpl.class); + private final ComputeManagementClient client; private final String serviceName; diff --git a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java index ca25fde742907..248d9a0447915 100644 --- a/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java +++ b/plugins/discovery-gce/src/main/java/org/elasticsearch/cloud/gce/GceMetadataService.java @@ -30,12 +30,15 @@ import com.google.api.client.http.HttpHeaders; import com.google.api.client.http.HttpResponse; import com.google.api.client.http.HttpTransport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cloud.gce.util.Access; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; public class GceMetadataService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(GceMetadataService.class); // Forcing Google Token API URL as set in GCE SDK to // http://metadata/computeMetadata/v1/instance/service-accounts/default/token diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 90f536385d523..6c8a4d20cd0d6 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -25,6 +25,8 @@ import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore; import org.elasticsearch.cloud.azure.storage.AzureStorageService; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; @@ -60,6 +62,7 @@ * */ public class AzureRepository extends BlobStoreRepository { + private static final Logger logger = LogManager.getLogger(AzureRepository.class); public static final String TYPE = "azure"; diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 379fc10bc86f0..5e692945e58aa 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -19,8 +19,8 @@ package org.elasticsearch.repositories.gcs; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -45,8 +45,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; class GoogleCloudStorageRepository extends BlobStoreRepository { - - private final Logger logger = LogManager.getLogger(GoogleCloudStorageRepository.class); + private static final Logger logger = LogManager.getLogger(GoogleCloudStorageRepository.class); private final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); // package private for testing diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java index 55641041a7051..f3eca07423dc6 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsRepository.java @@ -52,7 +52,7 @@ public final class HdfsRepository extends BlobStoreRepository { - private static final Logger LOGGER = LogManager.getLogger(HdfsRepository.class); + private static final Logger logger = LogManager.getLogger(HdfsRepository.class); private static final String CONF_SECURITY_PRINCIPAL = "security.principal"; @@ -104,7 +104,7 @@ private HdfsBlobStore createBlobstore(URI uri, String path, Settings repositoryS final Settings confSettings = repositorySettings.getByPrefix("conf."); for (String key : confSettings.keySet()) { - LOGGER.debug("Adding configuration to HDFS Client Configuration : {} = {}", key, confSettings.get(key)); + logger.debug("Adding configuration to HDFS Client Configuration : {} = {}", key, confSettings.get(key)); hadoopConfiguration.set(key, confSettings.get(key)); } @@ -158,7 +158,7 @@ private UserGroupInformation login(Configuration hadoopConfiguration, Settings r // Check to see if the authentication method is compatible if (kerberosPrincipal != null && authMethod.equals(AuthenticationMethod.SIMPLE)) { - LOGGER.warn("Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " + + logger.warn("Hadoop authentication method is set to [SIMPLE], but a Kerberos principal is " + "specified. Continuing with [KERBEROS] authentication."); SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, hadoopConfiguration); } else if (kerberosPrincipal == null && authMethod.equals(AuthenticationMethod.KERBEROS)) { @@ -171,15 +171,15 @@ private UserGroupInformation login(Configuration hadoopConfiguration, Settings r UserGroupInformation.setConfiguration(hadoopConfiguration); // Debugging - LOGGER.debug("Hadoop security enabled: [{}]", UserGroupInformation.isSecurityEnabled()); - LOGGER.debug("Using Hadoop authentication method: [{}]", SecurityUtil.getAuthenticationMethod(hadoopConfiguration)); + logger.debug("Hadoop security enabled: [{}]", UserGroupInformation.isSecurityEnabled()); + logger.debug("Using Hadoop authentication method: [{}]", SecurityUtil.getAuthenticationMethod(hadoopConfiguration)); // UserGroupInformation (UGI) instance is just a Hadoop specific wrapper around a Java Subject try { if (UserGroupInformation.isSecurityEnabled()) { String principal = preparePrincipal(kerberosPrincipal); String keytab = HdfsSecurityContext.locateKeytabFile(environment).toString(); - LOGGER.debug("Using kerberos principal [{}] and keytab located at [{}]", principal, keytab); + logger.debug("Using kerberos principal [{}] and keytab located at [{}]", principal, keytab); return UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytab); } return UserGroupInformation.getCurrentUser(); @@ -200,7 +200,7 @@ private static String preparePrincipal(String originalPrincipal) { } if (originalPrincipal.equals(finalPrincipal) == false) { - LOGGER.debug("Found service principal. Converted original principal name [{}] to server principal [{}]", + logger.debug("Found service principal. Converted original principal name [{}] to server principal [{}]", originalPrincipal, finalPrincipal); } } diff --git a/server/build.gradle b/server/build.gradle index fb5487d030ac0..dac81861c9a4e 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -185,9 +185,9 @@ task generateModulesList { } task generatePluginsList { - List plugins = project(':plugins').subprojects - .findAll { it.name.contains('example') == false } - .collect { it.name } + Set plugins = new TreeSet<>(project(':plugins').childProjects.keySet()) + plugins.remove('example') + File pluginsFile = new File(buildDir, 'generated-resources/plugins.txt') processResources.from(pluginsFile) inputs.property('plugins', plugins) diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index 503301d78c208..162e7206aaf6d 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -190,6 +190,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_6_5_0 = new Version(V_6_5_0_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_5_1_ID = 6050199; public static final Version V_6_5_1 = new Version(V_6_5_1_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); + public static final int V_6_5_2_ID = 6050299; + public static final Version V_6_5_2 = new Version(V_6_5_2_ID, org.apache.lucene.util.Version.LUCENE_7_5_0); public static final int V_6_6_0_ID = 6060099; public static final Version V_6_6_0 = new Version(V_6_6_0_ID, org.apache.lucene.util.Version.LUCENE_7_6_0); @@ -208,6 +210,8 @@ public static Version fromId(int id) { switch (id) { case V_6_6_0_ID: return V_6_6_0; + case V_6_5_2_ID: + return V_6_5_2; case V_6_5_1_ID: return V_6_5_1; case V_6_5_0_ID: diff --git a/server/src/main/java/org/elasticsearch/action/Action.java b/server/src/main/java/org/elasticsearch/action/Action.java index 51e3f5440ea4f..a0a99630c96c1 100644 --- a/server/src/main/java/org/elasticsearch/action/Action.java +++ b/server/src/main/java/org/elasticsearch/action/Action.java @@ -24,7 +24,8 @@ /** * Base action. Supports building the Request through a RequestBuilder. */ -public abstract class Action> +public abstract class Action> extends GenericAction { protected Action(String name) { diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java index 09db7089ff629..f98a6883b46d4 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteRequest.java @@ -35,12 +35,25 @@ */ public interface DocWriteRequest extends IndicesRequest { + /** + * Set the index for this request + * @return the Request + */ + T index(String index); + /** * Get the index that this request operates on * @return the index */ String index(); + + /** + * Set the type for this request + * @return the Request + */ + T type(String type); + /** * Get the type that this request operates on * @return the type diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 8327af3581b3b..c8a2866da81d0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -69,7 +69,8 @@ protected String executor() { @Override protected ClusterBlockException checkBlock(ClusterHealthRequest request, ClusterState state) { - return null; // we want users to be able to call this even when there are global blocks, just to check the health (are there blocks?) + // we want users to be able to call this even when there are global blocks, just to check the health (are there blocks?) + return null; } @Override @@ -78,17 +79,20 @@ protected ClusterHealthResponse newResponse() { } @Override - protected final void masterOperation(ClusterHealthRequest request, ClusterState state, ActionListener listener) throws Exception { + protected final void masterOperation(ClusterHealthRequest request, ClusterState state, + ActionListener listener) throws Exception { logger.warn("attempt to execute a cluster health operation without a task"); throw new UnsupportedOperationException("task parameter is required for this operation"); } @Override - protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) { + protected void masterOperation(Task task, final ClusterHealthRequest request, final ClusterState unusedState, + final ActionListener listener) { if (request.waitForEvents() != null) { final long endTimeMS = TimeValue.nsecToMSec(System.nanoTime()) + request.timeout().millis(); if (request.local()) { - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new LocalClusterUpdateTask(request.waitForEvents()) { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", + new LocalClusterUpdateTask(request.waitForEvents()) { @Override public ClusterTasksResult execute(ClusterState currentState) { return unchanged(); @@ -109,7 +113,8 @@ public void onFailure(String source, Exception e) { } }); } else { - clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", new ClusterStateUpdateTask(request.waitForEvents()) { + clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", + new ClusterStateUpdateTask(request.waitForEvents()) { @Override public ClusterState execute(ClusterState currentState) { return currentState; @@ -125,7 +130,8 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS @Override public void onNoLongerMaster(String source) { - logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", request.waitForEvents()); + logger.trace("stopped being master while waiting for events with priority [{}]. retrying.", + request.waitForEvents()); // TransportMasterNodeAction implements the retry logic, which is triggered by passing a NotMasterException listener.onFailure(new NotMasterException("no longer master. source: [" + source + "]")); } @@ -204,7 +210,8 @@ private boolean validateRequest(final ClusterHealthRequest request, ClusterState return readyCounter == waitFor; } - private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor, boolean timedOut) { + private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, + final int waitFor, boolean timedOut) { ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(), gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime()); int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver); @@ -306,8 +313,8 @@ static int prepareResponse(final ClusterHealthRequest request, final ClusterHeal } - private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState, int numberOfPendingTasks, int numberOfInFlightFetch, - TimeValue pendingTaskTimeInQueue) { + private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, ClusterState clusterState, int numberOfPendingTasks, + int numberOfInFlightFetch, TimeValue pendingTaskTimeInQueue) { if (logger.isTraceEnabled()) { logger.trace("Calculating health based on state version [{}]", clusterState.version()); } @@ -317,9 +324,9 @@ private ClusterHealthResponse clusterHealth(ClusterHealthRequest request, Cluste concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request); } catch (IndexNotFoundException e) { // one of the specified indices is not there - treat it as RED. - ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, clusterState, - numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), - pendingTaskTimeInQueue); + ClusterHealthResponse response = new ClusterHealthResponse(clusterState.getClusterName().value(), Strings.EMPTY_ARRAY, + clusterState, numberOfPendingTasks, numberOfInFlightFetch, UnassignedInfo.getNumberOfDelayedUnassigned(clusterState), + pendingTaskTimeInQueue); response.setStatus(ClusterHealthStatus.RED); return response; } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java index bb0c0b08a925c..5a9b67f6e3e7d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsAction.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -public class ClusterUpdateSettingsAction extends Action { +public class ClusterUpdateSettingsAction + extends Action { public static final ClusterUpdateSettingsAction INSTANCE = new ClusterUpdateSettingsAction(); public static final String NAME = "cluster:admin/settings/update"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java index 9aa5586d02bc8..b3bf62d7ef83c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsAction.java @@ -23,7 +23,8 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.Writeable; -public class ClusterSearchShardsAction extends Action { +public class ClusterSearchShardsAction + extends Action { public static final ClusterSearchShardsAction INSTANCE = new ClusterSearchShardsAction(); public static final String NAME = "indices:admin/shards/search_shards"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index 6347a027f4f89..cdef2a03b534c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -41,7 +41,8 @@ public class ClusterStatsNodeResponse extends BaseNodeResponse { ClusterStatsNodeResponse() { } - public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) { + public ClusterStatsNodeResponse(DiscoveryNode node, @Nullable ClusterHealthStatus clusterStatus, + NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) { super(node); this.nodeInfo = nodeInfo; this.nodeStats = nodeStats; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java index 0b420a4e505c2..a9d0fc6738d4c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksAction.java @@ -22,7 +22,8 @@ import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -public class PendingClusterTasksAction extends Action { +public class PendingClusterTasksAction + extends Action { public static final PendingClusterTasksAction INSTANCE = new PendingClusterTasksAction(); public static final String NAME = "cluster:monitor/task"; diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index c612beea59520..6c40d45cd7f85 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -109,7 +109,7 @@ protected void masterOperation(final CloseIndexRequest request, final ClusterSta .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) .indices(concreteIndices); - indexStateService.closeIndex(updateRequest, new ActionListener() { + indexStateService.closeIndices(updateRequest, new ActionListener() { @Override public void onResponse(ClusterStateUpdateResponse response) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java index ea3abe5e21a54..4062393167b79 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexClusterStateUpdateRequest.java @@ -28,7 +28,7 @@ public class OpenIndexClusterStateUpdateRequest extends IndicesClusterStateUpdat private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; - OpenIndexClusterStateUpdateRequest() { + public OpenIndexClusterStateUpdateRequest() { } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java index 97db91f8973f4..6ff03cb5291ea 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/open/OpenIndexResponse.java @@ -40,10 +40,10 @@ public class OpenIndexResponse extends ShardsAcknowledgedResponse { declareAcknowledgedAndShardsAcknowledgedFields(PARSER); } - OpenIndexResponse() { + public OpenIndexResponse() { } - OpenIndexResponse(boolean acknowledged, boolean shardsAcknowledged) { + public OpenIndexResponse(boolean acknowledged, boolean shardsAcknowledged) { super(acknowledged, shardsAcknowledged); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 34c18029ead5d..904003623b7e8 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -40,6 +40,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiConsumer; +import java.util.function.Supplier; /** * A bulk processor is a thread safe bulk processing class, allowing to easily set when to "flush" a new bulk request @@ -88,6 +89,10 @@ public static class Builder { private ByteSizeValue bulkSize = new ByteSizeValue(5, ByteSizeUnit.MB); private TimeValue flushInterval = null; private BackoffPolicy backoffPolicy = BackoffPolicy.exponentialBackoff(); + private String globalIndex; + private String globalType; + private String globalRouting; + private String globalPipeline; private Builder(BiConsumer> consumer, Listener listener, Scheduler scheduler, Runnable onClose) { @@ -136,6 +141,26 @@ public Builder setFlushInterval(TimeValue flushInterval) { return this; } + public Builder setGlobalIndex(String globalIndex) { + this.globalIndex = globalIndex; + return this; + } + + public Builder setGlobalType(String globalType) { + this.globalType = globalType; + return this; + } + + public Builder setGlobalRouting(String globalRouting) { + this.globalRouting = globalRouting; + return this; + } + + public Builder setGlobalPipeline(String globalPipeline) { + this.globalPipeline = globalPipeline; + return this; + } + /** * Sets a custom backoff policy. The backoff policy defines how the bulk processor should handle retries of bulk requests internally * in case they have failed due to resource constraints (i.e. a thread pool was full). @@ -156,8 +181,14 @@ public Builder setBackoffPolicy(BackoffPolicy backoffPolicy) { * Builds a new bulk processor. */ public BulkProcessor build() { - return new BulkProcessor(consumer, backoffPolicy, listener, concurrentRequests, bulkActions, bulkSize, flushInterval, - scheduler, onClose); + return new BulkProcessor(consumer, backoffPolicy, listener, concurrentRequests, bulkActions, + bulkSize, flushInterval, scheduler, onClose, createBulkRequestWithGlobalDefaults()); + } + + private Supplier createBulkRequestWithGlobalDefaults() { + return () -> new BulkRequest(globalIndex, globalType) + .pipeline(globalPipeline) + .routing(globalRouting); } } @@ -184,6 +215,7 @@ public static Builder builder(BiConsumer bulkRequestSupplier; private final BulkRequestHandler bulkRequestHandler; private final Scheduler scheduler; private final Runnable onClose; @@ -192,11 +224,12 @@ public static Builder builder(BiConsumer> consumer, BackoffPolicy backoffPolicy, Listener listener, int concurrentRequests, int bulkActions, ByteSizeValue bulkSize, @Nullable TimeValue flushInterval, - Scheduler scheduler, Runnable onClose) { + Scheduler scheduler, Runnable onClose, Supplier bulkRequestSupplier) { this.bulkActions = bulkActions; this.bulkSize = bulkSize.getBytes(); - this.bulkRequest = new BulkRequest(); this.scheduler = scheduler; + this.bulkRequest = bulkRequestSupplier.get(); + this.bulkRequestSupplier = bulkRequestSupplier; this.bulkRequestHandler = new BulkRequestHandler(consumer, backoffPolicy, listener, scheduler, concurrentRequests); // Start period flushing task after everything is setup this.cancellableFlushTask = startFlushTask(flushInterval, scheduler); @@ -337,7 +370,7 @@ private void execute() { final BulkRequest bulkRequest = this.bulkRequest; final long executionId = executionIdGen.incrementAndGet(); - this.bulkRequest = new BulkRequest(); + this.bulkRequest = bulkRequestSupplier.get(); this.bulkRequestHandler.execute(bulkRequest, executionId); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index 62b60a418a818..86ca1532881b8 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -96,12 +96,21 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT; private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; private RefreshPolicy refreshPolicy = RefreshPolicy.NONE; + private String globalPipeline; + private String globalRouting; + private String globalIndex; + private String globalType; private long sizeInBytes = 0; public BulkRequest() { } + public BulkRequest(@Nullable String globalIndex, @Nullable String globalType) { + this.globalIndex = globalIndex; + this.globalType = globalType; + } + /** * Adds a list of requests to be executed. Either index or delete requests. */ @@ -160,6 +169,8 @@ public BulkRequest add(IndexRequest request, @Nullable Object payload) { BulkRequest internalAdd(IndexRequest request, @Nullable Object payload) { Objects.requireNonNull(request, "'request' must not be null"); + applyGlobalMandatoryParameters(request); + requests.add(request); addPayload(payload); // lack of source is validated in validate() method @@ -181,6 +192,8 @@ public BulkRequest add(UpdateRequest request, @Nullable Object payload) { BulkRequest internalAdd(UpdateRequest request, @Nullable Object payload) { Objects.requireNonNull(request, "'request' must not be null"); + applyGlobalMandatoryParameters(request); + requests.add(request); addPayload(payload); if (request.doc() != null) { @@ -205,6 +218,8 @@ public BulkRequest add(DeleteRequest request) { public BulkRequest add(DeleteRequest request, @Nullable Object payload) { Objects.requireNonNull(request, "'request' must not be null"); + applyGlobalMandatoryParameters(request); + requests.add(request); addPayload(payload); sizeInBytes += REQUEST_OVERHEAD; @@ -332,15 +347,15 @@ public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Null String index = defaultIndex; String type = defaultType; String id = null; - String routing = defaultRouting; String parent = null; + String routing = valueOrDefault(defaultRouting, globalRouting); FetchSourceContext fetchSourceContext = defaultFetchSourceContext; String[] fields = defaultFields; String opType = null; long version = Versions.MATCH_ANY; VersionType versionType = VersionType.INTERNAL; int retryOnConflict = 0; - String pipeline = defaultPipeline; + String pipeline = valueOrDefault(defaultPipeline, globalPipeline); // at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id) // or START_OBJECT which will have another set of parameters @@ -527,6 +542,15 @@ public final BulkRequest timeout(TimeValue timeout) { return this; } + public final BulkRequest pipeline(String globalPipeline) { + this.globalPipeline = globalPipeline; + return this; + } + + public final BulkRequest routing(String globalRouting){ + this.globalRouting = globalRouting; + return this; + } /** * A timeout to wait if the index operation can't be performed immediately. Defaults to {@code 1m}. */ @@ -538,6 +562,14 @@ public TimeValue timeout() { return timeout; } + public String pipeline() { + return globalPipeline; + } + + public String routing() { + return globalRouting; + } + private int findNextMarker(byte marker, int from, BytesReference data, int length) { for (int i = from; i < length; i++) { if (data.get(i) == marker) { @@ -603,4 +635,15 @@ public String getDescription() { return "requests[" + requests.size() + "], indices[" + Strings.collectionToDelimitedString(indices, ", ") + "]"; } + private void applyGlobalMandatoryParameters(DocWriteRequest request) { + request.index(valueOrDefault(request.index(), globalIndex)); + request.type(valueOrDefault(request.type(), globalType)); + } + + private static String valueOrDefault(String value, String globalDefault) { + if (Strings.isNullOrEmpty(value) && !Strings.isNullOrEmpty(globalDefault)) { + return globalDefault; + } + return value; + } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java index ca4a5ef2cbb91..621163d51bbfc 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequestBuilder.java @@ -41,6 +41,10 @@ public class BulkRequestBuilder extends ActionRequestBuilder implements WriteRequestBuilder { + public BulkRequestBuilder(ElasticsearchClient client, BulkAction action, @Nullable String globalIndex, @Nullable String globalType) { + super(client, action, new BulkRequest(globalIndex, globalType)); + } + public BulkRequestBuilder(ElasticsearchClient client, BulkAction action) { super(client, action, new BulkRequest()); } @@ -153,4 +157,14 @@ public final BulkRequestBuilder setTimeout(String timeout) { public int numberOfActions() { return request.numberOfActions(); } + + public BulkRequestBuilder pipeline(String globalPipeline) { + request.pipeline(globalPipeline); + return this; + } + + public BulkRequestBuilder routing(String globalRouting) { + request.routing(globalRouting); + return this; + } } diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index ef4adaae4b0dd..c67ae9bf3df9c 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -112,6 +112,7 @@ public String type() { /** * Sets the type of the document to delete. */ + @Override public DeleteRequest type(String type) { this.type = type; return this; diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 57bc50ac36211..edfe7b6a7f1c7 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -216,6 +216,7 @@ public String type() { /** * Sets the type of the indexed document. */ + @Override public IndexRequest type(String type) { this.type = type; return this; diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index fb74b211c9011..6c0c58975af17 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -305,7 +305,8 @@ public IntArrayList[] fillDocIdsToLoad(int numShards, ScoreDoc[] shardDocs) { * completion suggestion ordered by suggestion name */ public InternalSearchResponse merge(boolean ignoreFrom, ReducedQueryPhase reducedQueryPhase, - Collection fetchResults, IntFunction resultsLookup) { + Collection fetchResults, + IntFunction resultsLookup) { if (reducedQueryPhase.isEmptyResult) { return InternalSearchResponse.empty(); } @@ -411,7 +412,8 @@ public ReducedQueryPhase reducedQueryPhase(Collection queryResults, boolean isScrollRequest, boolean trackTotalHits) { + public ReducedQueryPhase reducedQueryPhase(Collection queryResults, + boolean isScrollRequest, boolean trackTotalHits) { return reducedQueryPhase(queryResults, null, new ArrayList<>(), new TopDocsStats(trackTotalHits), 0, isScrollRequest); } @@ -666,8 +668,8 @@ private synchronized void consumeInternal(QuerySearchResult querySearchResult) { } if (hasTopDocs) { TopDocs reducedTopDocs = controller.mergeTopDocs(Arrays.asList(topDocsBuffer), - querySearchResult.from() + querySearchResult.size() // we have to merge here in the same way we collect on a shard - , 0); + // we have to merge here in the same way we collect on a shard + querySearchResult.from() + querySearchResult.size(), 0); Arrays.fill(topDocsBuffer, null); topDocsBuffer[0] = reducedTopDocs; } diff --git a/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java b/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java index b87ff9f7ec3bd..5a77db8be17ba 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java +++ b/server/src/main/java/org/elasticsearch/action/support/ActiveShardsObserver.java @@ -19,11 +19,12 @@ package org.elasticsearch.action.support; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.threadpool.ThreadPool; @@ -36,7 +37,9 @@ * This class provides primitives for waiting for a configured number of shards * to become active before sending a response on an {@link ActionListener}. */ -public class ActiveShardsObserver extends AbstractComponent { +public class ActiveShardsObserver { + + private static final Logger logger = LogManager.getLogger(ActiveShardsObserver.class); private final ClusterService clusterService; private final ThreadPool threadPool; diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index fda393e375c9e..938489d6cbedf 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -38,7 +38,8 @@ import java.util.List; import java.util.Set; -public class MultiTermVectorsRequest extends ActionRequest implements Iterable, CompositeIndicesRequest, RealtimeRequest { +public class MultiTermVectorsRequest extends ActionRequest + implements Iterable, CompositeIndicesRequest, RealtimeRequest { String preference; List requests = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequestBuilder.java index 2e68cd25b04d3..f08ba5d63d5d1 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequestBuilder.java @@ -23,7 +23,8 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; -public class MultiTermVectorsRequestBuilder extends ActionRequestBuilder { +public class MultiTermVectorsRequestBuilder + extends ActionRequestBuilder { public MultiTermVectorsRequestBuilder(ElasticsearchClient client, MultiTermVectorsAction action) { super(client, action, new MultiTermVectorsRequest()); diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index e026ef6ff0877..e4fae4d87deec 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -635,18 +635,21 @@ public static void parseRequest(TermVectorsRequest termVectorsRequest, XContentP termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map())); } else if (FILTER.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.filterSettings(readFilterSettings(parser)); - } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { // the following is important for multi request parsing. + } else if (INDEX.match(currentFieldName, parser.getDeprecationHandler())) { + // the following is important for multi request parsing. termVectorsRequest.index = parser.text(); } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { termVectorsRequest.type = parser.text(); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.doc != null) { - throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.id = parser.text(); } else if (DOC.match(currentFieldName, parser.getDeprecationHandler())) { if (termVectorsRequest.id != null) { - throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!"); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "either [id] or [doc] can be specified, but not both!"); } termVectorsRequest.doc(jsonBuilder().copyCurrentStructure(parser)); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { @@ -674,7 +677,8 @@ public static Map readPerFieldAnalyzer(Map map) if (e.getValue() instanceof String) { mapStrStr.put(e.getKey(), (String) e.getValue()); } else { - throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, but found [{}] instead", e.getKey(), e.getValue().getClass()); + throw new ElasticsearchParseException("expecting the analyzer at [{}] to be a String, " + + "but found [{}] instead", e.getKey(), e.getValue().getClass()); } } return mapStrStr; @@ -703,7 +707,8 @@ private static FilterSettings readFilterSettings(XContentParser parser) throws I } else if (currentFieldName.equals("max_word_length")) { settings.maxWordLength = parser.intValue(); } else { - throw new ElasticsearchParseException("failed to parse term vectors request. the field [{}] is not valid for filter parameter for term vector request", currentFieldName); + throw new ElasticsearchParseException("failed to parse term vectors request. " + + "the field [{}] is not valid for filter parameter for term vector request", currentFieldName); } } } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 01a9812516bf7..a1bc92c65eddb 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -197,7 +197,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private void buildField(XContentBuilder builder, final CharsRefBuilder spare, Fields theFields, Iterator fieldIter) throws IOException { + private void buildField(XContentBuilder builder, final CharsRefBuilder spare, Fields theFields, + Iterator fieldIter) throws IOException { String fieldName = fieldIter.next(); builder.startObject(fieldName); Terms curTerms = theFields.terms(fieldName); @@ -213,7 +214,8 @@ private void buildField(XContentBuilder builder, final CharsRefBuilder spare, Fi builder.endObject(); } - private void buildTerm(XContentBuilder builder, final CharsRefBuilder spare, Terms curTerms, TermsEnum termIter, BoostAttribute boostAtt) throws IOException { + private void buildTerm(XContentBuilder builder, final CharsRefBuilder spare, Terms curTerms, + TermsEnum termIter, BoostAttribute boostAtt) throws IOException { // start term, optimized writing BytesRef term = termIter.next(); spare.copyUTF8Bytes(term); @@ -235,7 +237,8 @@ private void buildTermStatistics(XContentBuilder builder, TermsEnum termIter) th // boolean that says if these values actually were requested. // However, we can assume that they were not if the statistic values are // <= 0. - assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) || ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1))); + assert (((termIter.docFreq() > 0) && (termIter.totalTermFreq() > 0)) || + ((termIter.docFreq() == -1) && (termIter.totalTermFreq() == -1))); int docFreq = termIter.docFreq(); if (docFreq > 0) { builder.field(FieldStrings.DOC_FREQ, docFreq); @@ -349,12 +352,13 @@ public void setExists(boolean exists) { this.exists = exists; } - public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields) throws IOException { + public void setFields(Fields termVectorsByField, Set selectedFields, + EnumSet flags, Fields topLevelFields) throws IOException { setFields(termVectorsByField, selectedFields, flags, topLevelFields, null, null); } - public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields, @Nullable AggregatedDfs dfs, - TermVectorsFilter termVectorsFilter) throws IOException { + public void setFields(Fields termVectorsByField, Set selectedFields, EnumSet flags, Fields topLevelFields, + @Nullable AggregatedDfs dfs, TermVectorsFilter termVectorsFilter) throws IOException { TermVectorsWriter tvw = new TermVectorsWriter(this); if (termVectorsByField != null) { diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java index 8a54406c1f9cb..1d76cd55d4826 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsWriter.java @@ -137,10 +137,12 @@ void setFields(Fields termVectorsByField, Set selectedFields, EnumSet shardRequests = new HashMap<>(); for (int i = 0; i < request.requests.size(); i++) { TermVectorsRequest termVectorsRequest = request.requests.get(i); - termVectorsRequest.routing(clusterState.metaData().resolveIndexRouting(termVectorsRequest.parent(), termVectorsRequest.routing(), termVectorsRequest.index())); + termVectorsRequest.routing(clusterState.metaData() + .resolveIndexRouting(termVectorsRequest.parent(), termVectorsRequest.routing(), termVectorsRequest.index())); if (!clusterState.metaData().hasConcreteIndex(termVectorsRequest.index())) { responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(termVectorsRequest.index(), termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index())))); continue; } String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName(); - if (termVectorsRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) { - responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(), - new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]")))); + if (termVectorsRequest.routing() == null && clusterState.getMetaData() + .routingRequired(concreteSingleIndex, termVectorsRequest.type())) { + responses.set(i, new MultiTermVectorsItemResponse(null, + new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(), + new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]")))); continue; } ShardId shardId = clusterService.operationRouting().shardId(clusterState, concreteSingleIndex, diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java index 10fd954354ba8..ceee5ad27c32f 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportShardMultiTermsVectorAction.java @@ -37,7 +37,8 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -public class TransportShardMultiTermsVectorAction extends TransportSingleShardAction { +public class TransportShardMultiTermsVectorAction + extends TransportSingleShardAction { private final IndicesService indicesService; @@ -87,7 +88,8 @@ protected MultiTermVectorsShardResponse shardOperation(MultiTermVectorsShardRequ if (TransportActions.isShardNotAvailableException(e)) { throw e; } else { - logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", shardId, termVectorsRequest.type(), termVectorsRequest.id()), e); + logger.debug(() -> new ParameterizedMessage("{} failed to execute multi term vectors for [{}]/[{}]", + shardId, termVectorsRequest.type(), termVectorsRequest.id()), e); response.add(request.locations.get(i), new MultiTermVectorsResponse.Failure(request.index(), termVectorsRequest.type(), termVectorsRequest.id(), e)); } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java index 3ca9d771f9078..1a221d4f5e8fe 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TransportTermVectorsAction.java @@ -75,7 +75,8 @@ protected boolean resolveIndex(TermVectorsRequest request) { @Override protected void resolveRequest(ClusterState state, InternalRequest request) { // update the routing (request#index here is possibly an alias or a parent) - request.request().routing(state.metaData().resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index())); + request.request().routing(state.metaData() + .resolveIndexRouting(request.request().parent(), request.request().routing(), request.request().index())); // Fail fast on the node that received the request. if (request.request().routing() == null && state.getMetaData().routingRequired(request.concreteIndex(), request.request().type())) { throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); diff --git a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java index e6f8f34c40534..0a1df34076ca0 100644 --- a/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java +++ b/server/src/main/java/org/elasticsearch/action/update/TransportUpdateAction.java @@ -74,9 +74,11 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio @Inject public TransportUpdateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, - TransportBulkAction bulkAction, TransportCreateIndexAction createIndexAction, UpdateHelper updateHelper, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, IndicesService indicesService, AutoCreateIndex autoCreateIndex) { - super(settings, UpdateAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, UpdateRequest::new); + TransportBulkAction bulkAction, TransportCreateIndexAction createIndexAction, UpdateHelper updateHelper, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, + IndicesService indicesService, AutoCreateIndex autoCreateIndex) { + super(settings, UpdateAction.NAME, threadPool, clusterService, transportService, + actionFilters, indexNameExpressionResolver, UpdateRequest::new); this.bulkAction = bulkAction; this.createIndexAction = createIndexAction; this.updateHelper = updateHelper; @@ -116,7 +118,8 @@ public static void resolveAndValidateRouting(MetaData metaData, String concreteI protected void doExecute(final UpdateRequest request, final ActionListener listener) { // if we don't have a master, we don't have metadata, that's fine, let it find a master using create index API if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { - createIndexAction.execute(new CreateIndexRequest().index(request.index()).cause("auto(update api)").masterNodeTimeout(request.timeout()), new ActionListener() { + createIndexAction.execute(new CreateIndexRequest().index(request.index()).cause("auto(update api)") + .masterNodeTimeout(request.timeout()), new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { innerExecute(request, listener); @@ -179,12 +182,15 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< final BytesReference upsertSourceBytes = upsertRequest.source(); bulkAction.execute(toSingleItemBulkRequest(upsertRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); if ((request.fetchSource() != null && request.fetchSource().fetchSource()) || (request.fields() != null && request.fields().length > 0)) { Tuple> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true, upsertRequest.getContentType()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), + response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes)); } else { update.setGetResult(null); } @@ -200,8 +206,11 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< final BytesReference indexSourceBytes = indexRequest.source(); bulkAction.execute(toSingleItemBulkRequest(indexRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), + response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), indexSourceBytes)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) @@ -211,8 +220,11 @@ protected void shardOperation(final UpdateRequest request, final ActionListener< DeleteRequest deleteRequest = result.action(); bulkAction.execute(toSingleItemBulkRequest(deleteRequest), wrapBulkResponse( ActionListener.wrap(response -> { - UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), response.getVersion(), response.getResult()); - update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); + UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), + response.getType(), response.getId(), response.getSeqNo(), response.getPrimaryTerm(), + response.getVersion(), response.getResult()); + update.setGetResult(UpdateHelper.extractGetResult(request, request.concreteIndex(), + response.getVersion(), result.updatedSourceAsMap(), result.updateSourceContentType(), null)); update.setForcedRefresh(response.forcedRefresh()); listener.onResponse(update); }, exception -> handleUpdateFailureWithRetry(listener, request, exception, retryCount))) diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index c3eb9f7fbda22..70983be4eeefd 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.update; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.DocWriteResponse; @@ -28,7 +29,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -57,7 +57,9 @@ /** * Helper for translating an update request to an index, delete request or update response. */ -public class UpdateHelper extends AbstractComponent { +public class UpdateHelper { + + private static final Logger logger = LogManager.getLogger(UpdateHelper.class); private final ScriptService scriptService; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index f7353b7c3cae7..7b16e096cb504 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -125,11 +125,13 @@ public ActionRequestValidationException validate() { } else { if (version != Versions.MATCH_ANY && retryOnConflict > 0) { - validationException = addValidationError("can't provide both retry_on_conflict and a specific version", validationException); + validationException = addValidationError("can't provide both retry_on_conflict and a specific version", + validationException); } if (!versionType.validateVersionForWrites(version)) { - validationException = addValidationError("illegal version value [" + version + "] for version type [" + versionType.name() + "]", validationException); + validationException = addValidationError("illegal version value [" + version + + "] for version type [" + versionType.name() + "]", validationException); } } @@ -624,8 +626,8 @@ private IndexRequest safeDoc() { } /** - * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException} - * is thrown. + * Sets the index request to be used if the document does not exists. Otherwise, a + * {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown. */ public UpdateRequest upsert(IndexRequest upsertRequest) { this.upsertRequest = upsertRequest; diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java index 1ad5d714b855d..88d86f643472f 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequestBuilder.java @@ -263,8 +263,8 @@ public UpdateRequestBuilder setDoc(XContentType xContentType, Object... source) } /** - * Sets the index request to be used if the document does not exists. Otherwise, a {@link org.elasticsearch.index.engine.DocumentMissingException} - * is thrown. + * Sets the index request to be used if the document does not exists. Otherwise, a + * {@link org.elasticsearch.index.engine.DocumentMissingException} is thrown. */ public UpdateRequestBuilder setUpsert(IndexRequest indexRequest) { request.upsert(indexRequest); diff --git a/server/src/main/java/org/elasticsearch/client/Client.java b/server/src/main/java/org/elasticsearch/client/Client.java index f97f618347af5..d2be1fba086df 100644 --- a/server/src/main/java/org/elasticsearch/client/Client.java +++ b/server/src/main/java/org/elasticsearch/client/Client.java @@ -232,6 +232,11 @@ public interface Client extends ElasticsearchClient, Releasable { */ BulkRequestBuilder prepareBulk(); + /** + * Executes a bulk of index / delete operations with default index and/or type + */ + BulkRequestBuilder prepareBulk(@Nullable String globalIndex, @Nullable String globalType); + /** * Gets the document that was indexed from an index with a type and id. * diff --git a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 111c1fd96d90e..49c957d88fd82 100644 --- a/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -19,6 +19,8 @@ package org.elasticsearch.client.support; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; @@ -334,7 +336,6 @@ import org.elasticsearch.client.IndicesAdminClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentType; @@ -343,7 +344,9 @@ import java.util.Map; -public abstract class AbstractClient extends AbstractComponent implements Client { +public abstract class AbstractClient implements Client { + + protected final Logger logger; protected final Settings settings; private final ThreadPool threadPool; @@ -354,6 +357,7 @@ public AbstractClient(Settings settings, ThreadPool threadPool) { this.settings = settings; this.threadPool = threadPool; this.admin = new Admin(this); + this.logger =LogManager.getLogger(this.getClass()); this.threadedWrapper = new ThreadedActionListener.Wrapper(logger, settings, threadPool); } @@ -483,6 +487,11 @@ public BulkRequestBuilder prepareBulk() { return new BulkRequestBuilder(this, BulkAction.INSTANCE); } + @Override + public BulkRequestBuilder prepareBulk(@Nullable String globalIndex, @Nullable String globalType) { + return new BulkRequestBuilder(this, BulkAction.INSTANCE, globalIndex, globalType); + } + @Override public ActionFuture get(final GetRequest request) { return execute(GetAction.INSTANCE, request); diff --git a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index b36fe45599590..7387b03ee822d 100644 --- a/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/server/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -20,6 +20,8 @@ package org.elasticsearch.client.transport; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.core.internal.io.IOUtils; @@ -35,7 +37,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Randomness; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; @@ -70,7 +71,9 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.atomic.AtomicInteger; -final class TransportClientNodesService extends AbstractComponent implements Closeable { +final class TransportClientNodesService implements Closeable { + + private static final Logger logger = LogManager.getLogger(TransportClientNodesService.class); private final TimeValue nodesSamplerInterval; diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 184cbcdf859d5..559b554b007d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; @@ -35,7 +36,6 @@ import org.elasticsearch.cluster.routing.allocation.DiskThresholdSettings; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -62,8 +62,9 @@ * Every time the timer runs, gathers information about the disk usage and * shard sizes across the cluster. */ -public class InternalClusterInfoService extends AbstractComponent - implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { +public class InternalClusterInfoService implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(InternalClusterInfoService.class); public static final Setting INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index 2a9d960f8ccea..8010765854016 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.cluster; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -54,6 +56,7 @@ * is done by {@link MasterFaultDetection}. */ public class NodeConnectionsService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(NodeConnectionsService.class); public static final Setting CLUSTER_NODE_RECONNECT_INTERVAL_SETTING = positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index 7b79823380c2e..810eb497c7537 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -19,12 +19,13 @@ package org.elasticsearch.cluster.action.index; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataMappingService; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -38,7 +39,9 @@ import java.io.IOException; -public class NodeMappingRefreshAction extends AbstractComponent { +public class NodeMappingRefreshAction { + + private static final Logger logger = LogManager.getLogger(NodeMappingRefreshAction.class); public static final String ACTION_NAME = "internal:cluster/node/mapping/refresh"; diff --git a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index 2b481015ca0ee..a8d5203c4fb0a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.action.shard; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; @@ -42,7 +43,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -73,7 +73,9 @@ import java.util.concurrent.ConcurrentMap; import java.util.function.Predicate; -public class ShardStateAction extends AbstractComponent { +public class ShardStateAction { + + private static final Logger logger = LogManager.getLogger(ShardStateAction.class); public static final String SHARD_STARTED_ACTION_NAME = "internal:cluster/shard/started"; public static final String SHARD_FAILED_ACTION_NAME = "internal:cluster/shard/failure"; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 892ce3abbf311..6ba0c9e5da12a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -51,7 +51,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.DeprecationLogger; @@ -99,7 +98,7 @@ /** * Service responsible for submitting create index requests */ -public class MetaDataCreateIndexService extends AbstractComponent { +public class MetaDataCreateIndexService { private static final Logger logger = LogManager.getLogger(MetaDataCreateIndexService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java index 39563ca7037d9..2b5bb0c94b41d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataDeleteIndexService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.delete.DeleteIndexClusterStateUpdateRequest; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -31,7 +33,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -47,7 +48,10 @@ /** * Deletes indices. */ -public class MetaDataDeleteIndexService extends AbstractComponent { +public class MetaDataDeleteIndexService { + + private static final Logger logger = LogManager.getLogger(MetaDataDeleteIndexService.class); + private final Settings settings; private final ClusterService clusterService; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 38d83b398856e..3d3258c5ae301 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -39,7 +39,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.ValidationException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.index.Index; @@ -59,7 +58,7 @@ /** * Service responsible for submitting open/close index requests */ -public class MetaDataIndexStateService extends AbstractComponent { +public class MetaDataIndexStateService { private static final Logger logger = LogManager.getLogger(MetaDataIndexStateService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); @@ -85,7 +84,7 @@ public MetaDataIndexStateService(ClusterService clusterService, AllocationServic this.activeShardsObserver = new ActiveShardsObserver(clusterService, threadPool); } - public void closeIndex(final CloseIndexClusterStateUpdateRequest request, final ActionListener listener) { + public void closeIndices(final CloseIndexClusterStateUpdateRequest request, final ActionListener listener) { if (request.indices() == null || request.indices().length == 0) { throw new IllegalArgumentException("Index name is required"); } @@ -100,46 +99,50 @@ protected ClusterStateUpdateResponse newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) { - Set indicesToClose = new HashSet<>(); - for (Index index : request.indices()) { - final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); - if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { - indicesToClose.add(indexMetaData); - } - } + return closeIndices(currentState, request.indices(), indicesAsString); + } + }); + } - if (indicesToClose.isEmpty()) { - return currentState; - } + public ClusterState closeIndices(ClusterState currentState, final Index[] indices, String indicesAsString) { + Set indicesToClose = new HashSet<>(); + for (Index index : indices) { + final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); + if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { + indicesToClose.add(indexMetaData); + } + } - // Check if index closing conflicts with any running restores - RestoreService.checkIndexClosing(currentState, indicesToClose); - // Check if index closing conflicts with any running snapshots - SnapshotsService.checkIndexClosing(currentState, indicesToClose); - logger.info("closing indices [{}]", indicesAsString); + if (indicesToClose.isEmpty()) { + return currentState; + } - MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); - ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() - .blocks(currentState.blocks()); - for (IndexMetaData openIndexMetadata : indicesToClose) { - final String indexName = openIndexMetadata.getIndex().getName(); - mdBuilder.put(IndexMetaData.builder(openIndexMetadata).state(IndexMetaData.State.CLOSE)); - blocksBuilder.addIndexBlock(indexName, INDEX_CLOSED_BLOCK); - } + // Check if index closing conflicts with any running restores + RestoreService.checkIndexClosing(currentState, indicesToClose); + // Check if index closing conflicts with any running snapshots + SnapshotsService.checkIndexClosing(currentState, indicesToClose); + logger.info("closing indices [{}]", indicesAsString); + + MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); + ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() + .blocks(currentState.blocks()); + for (IndexMetaData openIndexMetadata : indicesToClose) { + final String indexName = openIndexMetadata.getIndex().getName(); + mdBuilder.put(IndexMetaData.builder(openIndexMetadata).state(IndexMetaData.State.CLOSE)); + blocksBuilder.addIndexBlock(indexName, INDEX_CLOSED_BLOCK); + } - ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build(); + ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build(); - RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); - for (IndexMetaData index : indicesToClose) { - rtBuilder.remove(index.getIndex().getName()); - } + RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); + for (IndexMetaData index : indicesToClose) { + rtBuilder.remove(index.getIndex().getName()); + } - //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask - return allocationService.reroute( - ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(), - "indices closed [" + indicesAsString + "]"); - } - }); + //no explicit wait for other nodes needed as we use AckedClusterStateUpdateTask + return allocationService.reroute( + ClusterState.builder(updatedState).routingTable(rtBuilder.build()).build(), + "indices closed [" + indicesAsString + "]"); } public void openIndex(final OpenIndexClusterStateUpdateRequest request, diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 63798c67e8165..54a5908f2a223 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -30,7 +32,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.ValidationException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -61,7 +62,9 @@ /** * Service responsible for submitting index templates updates */ -public class MetaDataIndexTemplateService extends AbstractComponent { +public class MetaDataIndexTemplateService { + + private static final Logger logger = LogManager.getLogger(MetaDataIndexTemplateService.class); private final ClusterService clusterService; private final AliasValidator aliasValidator; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java index 29f2c1c911888..7da37c4415e62 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexUpgradeService.java @@ -18,12 +18,13 @@ */ package org.elasticsearch.cluster.metadata; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.Version; import org.elasticsearch.common.TriFunction; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -51,7 +52,9 @@ * occurs during cluster upgrade, when dangling indices are imported into the cluster or indices * are restored from a repository. */ -public class MetaDataIndexUpgradeService extends AbstractComponent { +public class MetaDataIndexUpgradeService { + + private static final Logger logger = LogManager.getLogger(MetaDataIndexUpgradeService.class); private final Settings settings; private final NamedXContentRegistry xContentRegistry; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 4f740e1028392..53d6457a217ef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingClusterStateUpdateRequest; @@ -32,7 +34,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.unit.TimeValue; @@ -57,7 +58,9 @@ /** * Service responsible for submitting mapping changes */ -public class MetaDataMappingService extends AbstractComponent { +public class MetaDataMappingService { + + private static final Logger logger = LogManager.getLogger(MetaDataMappingService.class); private final ClusterService clusterService; private final IndicesService indicesService; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index 2284d507afa2c..011d1113455c8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -37,7 +37,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.regex.Regex; @@ -62,7 +61,7 @@ /** * Service responsible for submitting update index settings requests */ -public class MetaDataUpdateSettingsService extends AbstractComponent { +public class MetaDataUpdateSettingsService { private static final Logger logger = LogManager.getLogger(MetaDataUpdateSettingsService.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java index 9026d26a11fd5..227dca6b739bc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java @@ -20,6 +20,8 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -34,7 +36,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; @@ -61,7 +62,10 @@ /** * Upgrades Templates on behalf of installed {@link Plugin}s when a node joins the cluster */ -public class TemplateUpgradeService extends AbstractComponent implements ClusterStateListener { +public class TemplateUpgradeService implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(TemplateUpgradeService.class); + private final UnaryOperator> indexTemplateMetaDataUpgraders; public final ClusterService clusterService; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java index fd7f8f6811fdf..2f593082fa9e3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; @@ -52,6 +54,7 @@ * another cluster change event. */ public class DelayedAllocationService extends AbstractLifecycleComponent implements ClusterStateListener { + private static final Logger logger = LogManager.getLogger(DelayedAllocationService.class); static final String CLUSTER_UPDATE_TASK_SOURCE = "delayed_allocation_reroute"; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 215b247bd97ea..dbf2dd7906b36 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AwarenessAllocationDecider; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -47,7 +46,7 @@ import java.util.Set; import java.util.stream.Collectors; -public class OperationRouting extends AbstractComponent { +public class OperationRouting { private static final Logger logger = LogManager.getLogger(OperationRouting.class); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 770e5b2717023..5d711cabce763 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -45,6 +47,7 @@ *

*/ public class RoutingService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(RoutingService.class); private static final String CLUSTER_UPDATE_TASK_SOURCE = "cluster_reroute"; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 0bc94a93cc59a..59f43a193ddc8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; @@ -38,7 +40,6 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.gateway.GatewayAllocator; import java.util.ArrayList; @@ -61,7 +62,9 @@ * for shard allocation. This class also manages new nodes joining the cluster * and rerouting of shards. */ -public class AllocationService extends AbstractComponent { +public class AllocationService { + + private static final Logger logger = LogManager.getLogger(AllocationService.class); private final AllocationDeciders allocationDeciders; private GatewayAllocator gatewayAllocator; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java index f2447a9c4e51b..4badab5a0cafa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -25,6 +25,8 @@ import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; @@ -35,7 +37,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -45,7 +46,10 @@ * reroute if it does. Also responsible for logging about nodes that have * passed the disk watermarks */ -public class DiskThresholdMonitor extends AbstractComponent { +public class DiskThresholdMonitor { + + private static final Logger logger = LogManager.getLogger(DiskThresholdMonitor.class); + private final DiskThresholdSettings diskThresholdSettings; private final Client client; private final Set nodeHasPassedWatermark = Sets.newConcurrentHashSet(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 597e904e24ec6..2e6d3b5c68a0d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; @@ -41,7 +42,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; @@ -78,7 +78,9 @@ * These parameters are combined in a {@link WeightFunction} that allows calculation of node weights which * are used to re-balance shards based on global as well as per-index factors. */ -public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator { +public class BalancedShardsAllocator implements ShardsAllocator { + + private static final Logger logger = LogManager.getLogger(BalancedShardsAllocator.class); public static final Setting INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, 0.0f, Property.Dynamic, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java index 3ae86d60bd98c..bd51b7d47b335 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDecider.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -31,7 +30,7 @@ * dynamic cluster- or index-wide shard allocation decisions on a per-node * basis. */ -public abstract class AllocationDecider extends AbstractComponent { +public abstract class AllocationDecider { /** * Returns a {@link Decision} whether the given shard routing can be * re-balanced to the given allocation. The default is diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java index 7f91be340fd79..5ab234c7e892d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AllocationDeciders.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -35,6 +37,8 @@ */ public class AllocationDeciders extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(AllocationDeciders.class); + private final Collection allocations; public AllocationDeciders(Collection allocations) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index 1ea369c75d9bc..4d309dd972818 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -21,6 +21,8 @@ import java.util.Locale; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.ClusterSettings; @@ -47,6 +49,8 @@ */ public class ClusterRebalanceAllocationDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(ClusterRebalanceAllocationDecider.class); + public static final String NAME = "cluster_rebalance"; private static final String CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE = "cluster.routing.allocation.allow_rebalance"; public static final Setting CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java index a11b3dcf102f8..a27f46b5512f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ConcurrentRebalanceAllocationDecider.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.settings.ClusterSettings; @@ -40,6 +42,8 @@ */ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(ConcurrentRebalanceAllocationDecider.class); + public static final String NAME = "concurrent_rebalance"; public static final Setting CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 9676eaf4df1c3..1f048fca76c09 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -22,6 +22,8 @@ import java.util.Set; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.DiskUsage; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -68,6 +70,8 @@ */ public class DiskThresholdDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(DiskThresholdDecider.class); + public static final String NAME = "disk_threshold"; private final DiskThresholdSettings diskThresholdSettings; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java index 7eb1b882d1ffe..1dbaa01be0936 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDecider.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -30,6 +32,8 @@ */ public class SnapshotInProgressAllocationDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(SnapshotInProgressAllocationDecider.class); + public static final String NAME = "snapshot_in_progress"; /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java index 0d67cd6071f08..596d3af261f17 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ThrottlingAllocationDecider.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.routing.allocation.decider; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -52,6 +54,8 @@ */ public class ThrottlingAllocationDecider extends AllocationDecider { + private static final Logger logger = LogManager.getLogger(ThrottlingAllocationDecider.class); + public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2; public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4; public static final String NAME = "throttling"; diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index 439a2c534a38f..125d381652b62 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.service; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -66,6 +67,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; public class ClusterApplierService extends AbstractLifecycleComponent implements ClusterApplier { + private static final Logger logger = LogManager.getLogger(ClusterApplierService.class); public static final String CLUSTER_UPDATE_THREAD_NAME = "clusterApplierService#updateTask"; diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java index f4c91b2781c71..abaf57d2eba32 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterService.java @@ -19,6 +19,8 @@ package org.elasticsearch.cluster.service; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateApplier; @@ -44,6 +46,7 @@ import java.util.function.Supplier; public class ClusterService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(ClusterService.class); private final MasterService masterService; diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index cbef687fc410a..d86cf7f7683e3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -67,6 +67,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; public class MasterService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(MasterService.class); public static final String MASTER_UPDATE_THREAD_NAME = "masterService#updateTask"; diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index cffdf0f4507c0..eea30dd4e530f 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -24,7 +24,6 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -33,7 +32,7 @@ import java.nio.file.Files; import java.nio.file.Path; -public class FsBlobStore extends AbstractComponent implements BlobStore { +public class FsBlobStore implements BlobStore { private final Path path; diff --git a/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java b/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java index cee7f3a5f42cf..8a3f8d0cff6f7 100644 --- a/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java +++ b/server/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java @@ -19,13 +19,16 @@ package org.elasticsearch.common.component; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Settings; import java.io.IOException; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; -public abstract class AbstractLifecycleComponent extends AbstractComponent implements LifecycleComponent { +public abstract class AbstractLifecycleComponent implements LifecycleComponent { + private static final Logger logger = LogManager.getLogger(AbstractLifecycleComponent.class); protected final Lifecycle lifecycle = new Lifecycle(); diff --git a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java index d80d90cd5fb49..7bb9ec3b8612d 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/AbstractScopedSettings.java @@ -19,12 +19,13 @@ package org.elasticsearch.common.settings; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.search.spell.LevensteinDistance; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.regex.Regex; import java.util.ArrayList; @@ -47,12 +48,15 @@ * A basic setting service that can be used for per-index and per-cluster settings. * This service offers transactional application of updates settings. */ -public abstract class AbstractScopedSettings extends AbstractComponent { +public abstract class AbstractScopedSettings { + public static final String ARCHIVED_SETTINGS_PREFIX = "archived."; private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$"); private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$"); private static final Pattern AFFIX_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+[*](?:[.][-\\w]+)+$"); + protected final Logger logger = LogManager.getLogger(this.getClass()); + private final Settings settings; private final List> settingUpdaters = new CopyOnWriteArrayList<>(); private final Map> complexMatchers; diff --git a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java index 6f2e28e97cbae..66bfdbdc194f5 100644 --- a/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java +++ b/server/src/main/java/org/elasticsearch/common/util/PageCacheRecycler.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.recycler.AbstractRecyclerC; @@ -39,7 +38,7 @@ import static org.elasticsearch.common.recycler.Recyclers.none; /** A recycler of fixed-size pages. */ -public class PageCacheRecycler extends AbstractComponent implements Releasable { +public class PageCacheRecycler implements Releasable { public static final Setting TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java b/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java index ebc64fa3af1d1..8cb2f6cf672f4 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoverySettings.java @@ -21,7 +21,6 @@ import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -34,7 +33,7 @@ /** * Exposes common discovery settings that may be supported by all the different discovery implementations */ -public class DiscoverySettings extends AbstractComponent { +public class DiscoverySettings { public static final int NO_MASTER_BLOCK_ID = 2; public static final ClusterBlock NO_MASTER_BLOCK_ALL = new ClusterBlock(NO_MASTER_BLOCK_ID, "no master", true, true, false, diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java index 75c69419723c3..189ce336c9598 100644 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.single; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -46,6 +48,7 @@ * A discovery implementation where the only member of the cluster is the local node. */ public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery { + private static final Logger logger = LogManager.getLogger(SingleNodeDiscovery.class); private final ClusterName clusterName; protected final TransportService transportService; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java b/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java index 8f2853904fa0f..e9eab8fa2550d 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ElectMasterService.java @@ -20,10 +20,11 @@ package org.elasticsearch.discovery.zen; import com.carrotsearch.hppc.ObjectContainer; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -37,7 +38,9 @@ import java.util.Objects; import java.util.stream.Collectors; -public class ElectMasterService extends AbstractComponent { +public class ElectMasterService { + + private static final Logger logger = LogManager.getLogger(ElectMasterService.class); public static final Setting DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING = Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java index 3d389fc814188..5c731621179ac 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/FaultDetection.java @@ -21,9 +21,10 @@ import java.io.Closeable; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -39,7 +40,9 @@ * A base class for {@link MasterFaultDetection} & {@link NodesFaultDetection}, * making sure both use the same setting. */ -public abstract class FaultDetection extends AbstractComponent implements Closeable { +public abstract class FaultDetection implements Closeable { + + private static final Logger logger = LogManager.getLogger(FaultDetection.class); public static final Setting CONNECT_ON_NETWORK_DISCONNECT_SETTING = Setting.boolSetting("discovery.zen.fd.connect_on_network_disconnect", false, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java index df5910878de0d..0dd0f10069bc4 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MasterFaultDetection.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.ClusterName; @@ -54,6 +56,8 @@ */ public class MasterFaultDetection extends FaultDetection { + private static final Logger logger = LogManager.getLogger(MasterFaultDetection.class); + public static final String MASTER_PING_ACTION_NAME = "internal:discovery/zen/fd/master_ping"; public interface Listener { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java index 78247903359ad..29b5473fc3a76 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/MembershipAction.java @@ -19,13 +19,14 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -43,7 +44,9 @@ import java.util.function.BiConsumer; import java.util.function.Supplier; -public class MembershipAction extends AbstractComponent { +public class MembershipAction { + + private static final Logger logger = LogManager.getLogger(MembershipAction.class); public static final String DISCOVERY_JOIN_ACTION_NAME = "internal:discovery/zen/join"; public static final String DISCOVERY_JOIN_VALIDATE_ACTION_NAME = "internal:discovery/zen/join/validate"; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index ecf52a6975369..2e2f72ca0fd94 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.common.Priority; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoverySettings; @@ -54,7 +54,9 @@ * This class processes incoming join request (passed zia {@link ZenDiscovery}). Incoming nodes * are directly added to the cluster state or are accumulated during master election. */ -public class NodeJoinController extends AbstractComponent { +public class NodeJoinController { + + private static final Logger logger = LogManager.getLogger(NodeJoinController.class); private final MasterService masterService; private final JoinTaskExecutor joinTaskExecutor; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java index 33f30c1103e47..9dc9cc78179d3 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodesFaultDetection.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -53,6 +55,8 @@ */ public class NodesFaultDetection extends FaultDetection { + private static final Logger logger = LogManager.getLogger(NodesFaultDetection.class); + public static final String PING_ACTION_NAME = "internal:discovery/zen/fd/ping"; public abstract static class Listener { diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java index 580af49c0e007..029218534ab95 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/PublishClusterStateAction.java @@ -19,6 +19,8 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -30,7 +32,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -66,7 +67,9 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -public class PublishClusterStateAction extends AbstractComponent { +public class PublishClusterStateAction { + + private static final Logger logger = LogManager.getLogger(PublishClusterStateAction.class); public static final String SEND_ACTION_NAME = "internal:discovery/zen/publish/send"; public static final String COMMIT_ACTION_NAME = "internal:discovery/zen/publish/commit"; diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java index a11e255f88878..3b16c3734156f 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/SettingsBasedHostsProvider.java @@ -19,7 +19,8 @@ package org.elasticsearch.discovery.zen; -import org.elasticsearch.common.component.AbstractComponent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -38,7 +39,9 @@ * An example unicast hosts setting might look as follows: * [67.81.244.10, 67.81.244.11:9305, 67.81.244.15:9400] */ -public class SettingsBasedHostsProvider extends AbstractComponent implements UnicastHostsProvider { +public class SettingsBasedHostsProvider implements UnicastHostsProvider { + + private static final Logger logger = LogManager.getLogger(SettingsBasedHostsProvider.class); public static final Setting> DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING = Setting.listSetting("discovery.zen.ping.unicast.hosts", emptyList(), Function.identity(), Setting.Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java index acb8ec478409c..459666e0c8443 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/UnicastZenPing.java @@ -20,6 +20,7 @@ package org.elasticsearch.discovery.zen; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; @@ -29,7 +30,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lease.Releasable; @@ -90,7 +90,9 @@ import static java.util.Collections.emptySet; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; -public class UnicastZenPing extends AbstractComponent implements ZenPing { +public class UnicastZenPing implements ZenPing { + + private static final Logger logger = LogManager.getLogger(UnicastZenPing.class); public static final String ACTION_NAME = "internal:discovery/zen/unicast"; public static final Setting DISCOVERY_ZEN_PING_UNICAST_CONCURRENT_CONNECTS_SETTING = diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index 6f51a6d20bbeb..4802e37449e3b 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.core.internal.io.IOUtils; @@ -86,6 +87,7 @@ import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; public class ZenDiscovery extends AbstractLifecycleComponent implements Discovery, PingContextProvider, IncomingClusterStateListener { + private static final Logger logger = LogManager.getLogger(ZenDiscovery.class); public static final Setting PING_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.zen.ping_timeout", timeValueSeconds(3), Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index ba29a08987d3c..7e4172961ea1e 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -28,7 +29,6 @@ import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; -import org.elasticsearch.common.component.AbstractComponent; import java.util.ArrayList; import java.util.List; @@ -40,7 +40,10 @@ * Individual implementations of this class are responsible for providing * the logic to determine to which nodes (if any) those shards are allocated. */ -public abstract class BaseGatewayShardAllocator extends AbstractComponent { +public abstract class BaseGatewayShardAllocator { + + protected final Logger logger = LogManager.getLogger(this.getClass()); + /** * Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist. * It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)} diff --git a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index d9eb5013e9c6c..4d7949cdf4de8 100644 --- a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -20,13 +20,14 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.env.NodeEnvironment; @@ -50,7 +51,9 @@ * their state written on disk, but don't exists in the metadata of the cluster), and importing * them into the cluster. */ -public class DanglingIndicesState extends AbstractComponent implements ClusterStateListener { +public class DanglingIndicesState implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(DanglingIndicesState.class); private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index 1248d9c888c91..21da38af2a239 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -21,13 +21,14 @@ import com.carrotsearch.hppc.ObjectFloatHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.ElectMasterService; @@ -37,7 +38,9 @@ import java.util.Arrays; import java.util.Map; -public class Gateway extends AbstractComponent { +public class Gateway { + + private static final Logger logger = LogManager.getLogger(Gateway.class); private final ClusterService clusterService; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index dce92b1dd5083..67d9ab9a5bf88 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -19,6 +19,7 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.action.support.nodes.BaseNodesResponse; @@ -30,7 +31,6 @@ import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; @@ -40,7 +40,9 @@ import java.util.List; import java.util.concurrent.ConcurrentMap; -public class GatewayAllocator extends AbstractComponent { +public class GatewayAllocator { + + private static final Logger logger = LogManager.getLogger(GatewayAllocator.class); private final RoutingService routingService; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 9bbb5af5bf028..7220d8f873334 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -20,6 +20,8 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -32,7 +34,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.NodeEnvironment; @@ -57,7 +58,9 @@ import static java.util.Collections.emptySet; import static java.util.Collections.unmodifiableSet; -public class GatewayMetaState extends AbstractComponent implements ClusterStateApplier { +public class GatewayMetaState implements ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(GatewayMetaState.class); private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index e19e8367f1627..004b71b9d9703 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -20,6 +20,8 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -48,6 +50,7 @@ import java.util.concurrent.atomic.AtomicBoolean; public class GatewayService extends AbstractLifecycleComponent implements ClusterStateListener { + private static final Logger logger = LogManager.getLogger(GatewayService.class); public static final Setting EXPECTED_NODES_SETTING = Setting.intSetting("gateway.expected_nodes", -1, -1, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 11184e7427240..a829262f406fd 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -19,6 +19,8 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; @@ -31,7 +33,6 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,7 +50,9 @@ import java.util.Arrays; import java.util.Collection; -public class LocalAllocateDangledIndices extends AbstractComponent { +public class LocalAllocateDangledIndices { + + private static final Logger logger = LogManager.getLogger(LocalAllocateDangledIndices.class); public static final String ACTION_NAME = "internal:gateway/local/allocate_dangled"; diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 24f5fd63662d9..66e0c6e2f06ee 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -19,11 +19,12 @@ package org.elasticsearch.gateway; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -36,7 +37,9 @@ /** * Handles writing and loading both {@link MetaData} and {@link IndexMetaData} */ -public class MetaStateService extends AbstractComponent { +public class MetaStateService { + + private static final Logger logger = LogManager.getLogger(MetaStateService.class); private final NodeEnvironment nodeEnv; private final NamedXContentRegistry namedXContentRegistry; diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index 78cd8d462ea91..543ec9be75d17 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -60,7 +60,8 @@ public void shardRoutingChanged(IndexShard indexShard, @Nullable ShardRouting ol try { listener.shardRoutingChanged(indexShard, oldRouting, newRouting); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke shard touring changed callback", + indexShard.shardId().getId()), e); } } } @@ -71,7 +72,8 @@ public void afterIndexShardCreated(IndexShard indexShard) { try { listener.afterIndexShardCreated(indexShard); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard created callback", + indexShard.shardId().getId()), e); throw e; } } @@ -83,7 +85,8 @@ public void afterIndexShardStarted(IndexShard indexShard) { try { listener.afterIndexShardStarted(indexShard); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard started callback", + indexShard.shardId().getId()), e); throw e; } } @@ -96,7 +99,8 @@ public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh try { listener.beforeIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard closed callback", + shardId.getId()), e); throw e; } } @@ -109,7 +113,8 @@ public void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSha try { listener.afterIndexShardClosed(shardId, indexShard, indexSettings); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard closed callback", + shardId.getId()), e); throw e; } } @@ -121,19 +126,22 @@ public void onShardInactive(IndexShard indexShard) { try { listener.onShardInactive(indexShard); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke on shard inactive callback", + indexShard.shardId().getId()), e); throw e; } } } @Override - public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) { + public void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, + @Nullable String reason) { for (IndexEventListener listener : listeners) { try { listener.indexShardStateChanged(indexShard, previousState, indexShard.state(), reason); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", indexShard.shardId().getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke index shard state changed callback", + indexShard.shardId().getId()), e); throw e; } } @@ -169,7 +177,8 @@ public void beforeIndexShardCreated(ShardId shardId, Settings indexSettings) { try { listener.beforeIndexShardCreated(shardId, indexSettings); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); + logger.warn(() -> + new ParameterizedMessage("[{}] failed to invoke before shard created callback", shardId), e); throw e; } } @@ -206,7 +215,8 @@ public void beforeIndexShardDeleted(ShardId shardId, try { listener.beforeIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke before shard deleted callback", + shardId.getId()), e); throw e; } } @@ -219,7 +229,8 @@ public void afterIndexShardDeleted(ShardId shardId, try { listener.afterIndexShardDeleted(shardId, indexSettings); } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", shardId.getId()), e); + logger.warn(() -> new ParameterizedMessage("[{}] failed to invoke after shard deleted callback", + shardId.getId()), e); throw e; } } diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index 472e4adedb666..2866b7d7ac864 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -93,7 +93,8 @@ public final class IndexSettings { case "checksum": return s; default: - throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of [true, false, fix, checksum] but was: " + s); + throw new IllegalArgumentException("unknown value for [index.shard.check_on_startup] must be one of " + + "[true, false, fix, checksum] but was: " + s); } }, Property.IndexScope); @@ -135,7 +136,6 @@ public final class IndexSettings { public static final Setting MAX_ANALYZED_OFFSET_SETTING = Setting.intSetting("index.highlight.max_analyzed_offset", -1, -1, Property.Dynamic, Property.IndexScope); - /** * Index setting describing the maximum number of terms that can be used in Terms Query. * The default maximum of 65536 terms is defensive, as extra processing and memory is involved @@ -176,7 +176,8 @@ public final class IndexSettings { * because they both do the same thing: control the size of the heap of hits. */ public static final Setting MAX_RESCORE_WINDOW_SETTING = - Setting.intSetting("index.max_rescore_window", MAX_RESULT_WINDOW_SETTING, 1, Property.Dynamic, Property.IndexScope); + Setting.intSetting("index.max_rescore_window", MAX_RESULT_WINDOW_SETTING, 1, + Property.Dynamic, Property.IndexScope); /** * Index setting describing the maximum number of filters clauses that can be used * in an adjacency_matrix aggregation. The max number of buckets produced by @@ -205,8 +206,8 @@ public final class IndexSettings { * the chance of ops based recoveries. **/ public static final Setting INDEX_TRANSLOG_RETENTION_AGE_SETTING = - Setting.timeSetting("index.translog.retention.age", TimeValue.timeValueHours(12), TimeValue.timeValueMillis(-1), Property.Dynamic, - Property.IndexScope); + Setting.timeSetting("index.translog.retention.age", TimeValue.timeValueHours(12), TimeValue.timeValueMillis(-1), + Property.Dynamic, Property.IndexScope); /** * Controls how many translog files that are no longer needed for persistence reasons @@ -242,8 +243,8 @@ public final class IndexSettings { */ public static final TimeValue DEFAULT_GC_DELETES = TimeValue.timeValueSeconds(60); public static final Setting INDEX_GC_DELETES_SETTING = - Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), Property.Dynamic, - Property.IndexScope); + Setting.timeSetting("index.gc_deletes", DEFAULT_GC_DELETES, new TimeValue(-1, TimeUnit.MILLISECONDS), + Property.Dynamic, Property.IndexScope); /** * Specifies if the index should use soft-delete instead of hard-delete for update/delete operations. @@ -257,13 +258,14 @@ public final class IndexSettings { * If soft-deletes is enabled, an engine by default will retain all operations up to the global checkpoint. **/ public static final Setting INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING = - Setting.longSetting("index.soft_deletes.retention.operations", 0, 0, Property.IndexScope, Property.Dynamic); + Setting.longSetting("index.soft_deletes.retention.operations", 0, 0, + Property.IndexScope, Property.Dynamic); /** * The maximum number of refresh listeners allows on this shard. */ - public static final Setting MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", 1000, 0, - Property.Dynamic, Property.IndexScope); + public static final Setting MAX_REFRESH_LISTENERS_PER_SHARD = Setting.intSetting("index.max_refresh_listeners", + 1000, 0, Property.Dynamic, Property.IndexScope); /** * The maximum number of slices allowed in a scroll request @@ -475,16 +477,23 @@ public IndexSettings(final IndexMetaData indexMetaData, final Settings nodeSetti defaultPipeline = scopedSettings.get(DEFAULT_PIPELINE); scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_COMPOUND_FORMAT_SETTING, mergePolicyConfig::setNoCFSRatio); - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, mergePolicyConfig::setDeletesPctAllowed); - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, mergePolicyConfig::setExpungeDeletesAllowed); - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, mergePolicyConfig::setFloorSegmentSetting); - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, mergePolicyConfig::setMaxMergesAtOnce); - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, mergePolicyConfig::setMaxMergesAtOnceExplicit); - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, mergePolicyConfig::setMaxMergedSegment); - scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, mergePolicyConfig::setSegmentsPerTier); - - scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, - mergeSchedulerConfig::setMaxThreadAndMergeCount); + scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING, + mergePolicyConfig::setDeletesPctAllowed); + scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING, + mergePolicyConfig::setExpungeDeletesAllowed); + scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING, + mergePolicyConfig::setFloorSegmentSetting); + scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING, + mergePolicyConfig::setMaxMergesAtOnce); + scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING, + mergePolicyConfig::setMaxMergesAtOnceExplicit); + scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING, + mergePolicyConfig::setMaxMergedSegment); + scopedSettings.addSettingsUpdateConsumer(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING, + mergePolicyConfig::setSegmentsPerTier); + + scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.MAX_THREAD_COUNT_SETTING, + MergeSchedulerConfig.MAX_MERGE_COUNT_SETTING, mergeSchedulerConfig::setMaxThreadAndMergeCount); scopedSettings.addSettingsUpdateConsumer(MergeSchedulerConfig.AUTO_THROTTLE_SETTING, mergeSchedulerConfig::setAutoThrottle); scopedSettings.addSettingsUpdateConsumer(INDEX_TRANSLOG_DURABILITY_SETTING, this::setTranslogDurability); scopedSettings.addSettingsUpdateConsumer(MAX_RESULT_WINDOW_SETTING, this::setMaxResultWindow); @@ -619,14 +628,16 @@ public Settings getNodeSettings() { } /** - * Updates the settings and index metadata and notifies all registered settings consumers with the new settings iff at least one setting has changed. + * Updates the settings and index metadata and notifies all registered settings consumers with the new settings iff at least one + * setting has changed. * * @return true iff any setting has been updated otherwise false. */ public synchronized boolean updateIndexMetaData(IndexMetaData indexMetaData) { final Settings newSettings = indexMetaData.getSettings(); if (version.equals(Version.indexCreated(newSettings)) == false) { - throw new IllegalArgumentException("version mismatch on settings update expected: " + version + " but was: " + Version.indexCreated(newSettings)); + throw new IllegalArgumentException("version mismatch on settings update expected: " + version + " but was: " + + Version.indexCreated(newSettings)); } final String newUUID = newSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE); if (newUUID.equals(getUUID()) == false) { @@ -703,7 +714,8 @@ public TimeValue getRefreshInterval() { public ByteSizeValue getTranslogRetentionSize() { return translogRetentionSize; } /** - * Returns the transaction log retention age which controls the maximum age (time from creation) that translog files will be kept around + * Returns the transaction log retention age which controls the maximum age (time from creation) that translog files will be kept + * around */ public TimeValue getTranslogRetentionAge() { return translogRetentionAge; } diff --git a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java index ce06b68dcff23..b3505e405d818 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexWarmer.java +++ b/server/src/main/java/org/elasticsearch/index/IndexWarmer.java @@ -19,9 +19,10 @@ package org.elasticsearch.index; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -41,7 +42,9 @@ import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -public final class IndexWarmer extends AbstractComponent { +public final class IndexWarmer { + + private static final Logger logger = LogManager.getLogger(IndexWarmer.class); private final List listeners; diff --git a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java index 321450d9f2038..b753cb80363ff 100644 --- a/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java +++ b/server/src/main/java/org/elasticsearch/index/MergePolicyConfig.java @@ -130,8 +130,8 @@ public final class MergePolicyConfig { public static final double DEFAULT_RECLAIM_DELETES_WEIGHT = 2.0d; public static final double DEFAULT_DELETES_PCT_ALLOWED = 33.0d; public static final Setting INDEX_COMPOUND_FORMAT_SETTING = - new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), MergePolicyConfig::parseNoCFSRatio, - Property.Dynamic, Property.IndexScope); + new Setting<>("index.compound_format", Double.toString(TieredMergePolicy.DEFAULT_NO_CFS_RATIO), + MergePolicyConfig::parseNoCFSRatio, Property.Dynamic, Property.IndexScope); public static final Setting INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING = Setting.doubleSetting("index.merge.policy.expunge_deletes_allowed", DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d, @@ -157,8 +157,8 @@ public final class MergePolicyConfig { public static final Setting INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING = Setting.doubleSetting("index.merge.policy.deletes_pct_allowed", DEFAULT_DELETES_PCT_ALLOWED, 20.0d, 50.0d, Property.Dynamic, Property.IndexScope); - public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; // don't convert to Setting<> and register... we only set this in tests and register via a plugin - + // don't convert to Setting<> and register... we only set this in tests and register via a plugin + public static final String INDEX_MERGE_ENABLED = "index.merge.enabled"; MergePolicyConfig(Logger logger, IndexSettings indexSettings) { this.logger = logger; @@ -166,15 +166,17 @@ public final class MergePolicyConfig { ByteSizeValue floorSegment = indexSettings.getValue(INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING); int maxMergeAtOnce = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING); int maxMergeAtOnceExplicit = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING); - // TODO is this really a good default number for max_merge_segment, what happens for large indices, won't they end up with many segments? + // TODO is this really a good default number for max_merge_segment, what happens for large indices, + // won't they end up with many segments? ByteSizeValue maxMergedSegment = indexSettings.getValue(INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING); double segmentsPerTier = indexSettings.getValue(INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING); double reclaimDeletesWeight = indexSettings.getValue(INDEX_MERGE_POLICY_RECLAIM_DELETES_WEIGHT_SETTING); double deletesPctAllowed = indexSettings.getValue(INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING); - this.mergesEnabled = indexSettings.getSettings() - .getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), INDEX_MERGE_ENABLED, true, new DeprecationLogger(logger)); + this.mergesEnabled = indexSettings.getSettings().getAsBooleanLenientForPreEs6Indices(indexSettings.getIndexVersionCreated(), + INDEX_MERGE_ENABLED, true, new DeprecationLogger(logger)); if (mergesEnabled == false) { - logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production environments", INDEX_MERGE_ENABLED); + logger.warn("[{}] is set to false, this should only be used in tests and can cause serious problems in production" + + " environments", INDEX_MERGE_ENABLED); } maxMergeAtOnce = adjustMaxMergeAtOnceIfNeeded(maxMergeAtOnce, segmentsPerTier); mergePolicy.setNoCFSRatio(indexSettings.getValue(INDEX_COMPOUND_FORMAT_SETTING)); @@ -186,8 +188,11 @@ public final class MergePolicyConfig { mergePolicy.setSegmentsPerTier(segmentsPerTier); mergePolicy.setDeletesPctAllowed(deletesPctAllowed); if (logger.isTraceEnabled()) { - logger.trace("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}], max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}], deletes_pct_allowed[{}]", - forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, deletesPctAllowed); + logger.trace("using [tiered] merge mergePolicy with expunge_deletes_allowed[{}], floor_segment[{}]," + + " max_merge_at_once[{}], max_merge_at_once_explicit[{}], max_merged_segment[{}], segments_per_tier[{}]," + + " deletes_pct_allowed[{}]", + forceMergeDeletesPctAllowed, floorSegment, maxMergeAtOnce, maxMergeAtOnceExplicit, maxMergedSegment, segmentsPerTier, + deletesPctAllowed); } } @@ -231,7 +236,9 @@ private int adjustMaxMergeAtOnceIfNeeded(int maxMergeAtOnce, double segmentsPerT if (newMaxMergeAtOnce <= 1) { newMaxMergeAtOnce = 2; } - logger.debug("changing max_merge_at_once from [{}] to [{}] because segments_per_tier [{}] has to be higher or equal to it", maxMergeAtOnce, newMaxMergeAtOnce, segmentsPerTier); + logger.debug("changing max_merge_at_once from [{}] to [{}] because segments_per_tier [{}] has to be higher or " + + "equal to it", + maxMergeAtOnce, newMaxMergeAtOnce, segmentsPerTier); maxMergeAtOnce = newMaxMergeAtOnce; } return maxMergeAtOnce; @@ -255,7 +262,8 @@ private static double parseNoCFSRatio(String noCFSRatio) { } return value; } catch (NumberFormatException ex) { - throw new IllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: [" + noCFSRatio + "]", ex); + throw new IllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: " + + "[" + noCFSRatio + "]", ex); } } } diff --git a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java index 200d72f601d11..32c527f06ff3b 100644 --- a/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java +++ b/server/src/main/java/org/elasticsearch/index/SearchSlowLog.java @@ -86,22 +86,30 @@ public SearchSlowLog(IndexSettings indexSettings) { this.queryLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".query"); this.fetchLogger = LogManager.getLogger(INDEX_SEARCH_SLOWLOG_PREFIX + ".fetch"); - indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, this::setQueryWarnThreshold); + indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING, + this::setQueryWarnThreshold); this.queryWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING).nanos(); - indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, this::setQueryInfoThreshold); + indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING, + this::setQueryInfoThreshold); this.queryInfoThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING).nanos(); - indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING, this::setQueryDebugThreshold); + indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING, + this::setQueryDebugThreshold); this.queryDebugThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING).nanos(); - indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING, this::setQueryTraceThreshold); + indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING, + this::setQueryTraceThreshold); this.queryTraceThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING).nanos(); - indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING, this::setFetchWarnThreshold); + indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING, + this::setFetchWarnThreshold); this.fetchWarnThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING).nanos(); - indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING, this::setFetchInfoThreshold); + indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING, + this::setFetchInfoThreshold); this.fetchInfoThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING).nanos(); - indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, this::setFetchDebugThreshold); + indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, + this::setFetchDebugThreshold); this.fetchDebugThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING).nanos(); - indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING, this::setFetchTraceThreshold); + indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING, + this::setFetchTraceThreshold); this.fetchTraceThreshold = indexSettings.getValue(INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING).nanos(); indexSettings.getScopedSettings().addSettingsUpdateConsumer(INDEX_SEARCH_SLOWLOG_LEVEL, this::setLevel); @@ -170,7 +178,8 @@ public String toString() { Strings.collectionToDelimitedString(context.groupStats(), ",", "", "", sb); sb.append("], "); } - sb.append("search_type[").append(context.searchType()).append("], total_shards[").append(context.numberOfShards()).append("], "); + sb.append("search_type[").append(context.searchType()).append("], total_shards[") + .append(context.numberOfShards()).append("], "); if (context.request().source() != null) { sb.append("source[").append(context.request().source().toString(FORMAT_PARAMS)).append("], "); } else { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index ba25377c57941..b3b1d37e99d0f 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -170,10 +170,13 @@ public Map buildTokenFilterFactories(IndexSettings i * instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and * hide internal data-structures as much as possible. */ - tokenFilters.put("synonym", requiresAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings))); - tokenFilters.put("synonym_graph", requiresAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings))); + tokenFilters.put("synonym", requiresAnalysisSettings((is, env, name, settings) -> + new SynonymTokenFilterFactory(is, env, this, name, settings))); + tokenFilters.put("synonym_graph", requiresAnalysisSettings((is, env, name, settings) -> + new SynonymGraphTokenFilterFactory(is, env, this, name, settings))); - return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters); + return buildMapping(Component.FILTER, indexSettings, tokenFiltersSettings, + Collections.unmodifiableMap(tokenFilters), prebuiltAnalysis.preConfiguredTokenFilters); } public Map buildTokenizerFactories(IndexSettings indexSettings) throws IOException { @@ -183,7 +186,8 @@ public Map buildTokenizerFactories(IndexSettings index public Map buildCharFilterFactories(IndexSettings indexSettings) throws IOException { final Map charFiltersSettings = indexSettings.getSettings().getGroups(INDEX_ANALYSIS_CHAR_FILTER); - return buildMapping(Component.CHAR_FILTER, indexSettings, charFiltersSettings, charFilters, prebuiltAnalysis.preConfiguredCharFilterFactories); + return buildMapping(Component.CHAR_FILTER, indexSettings, charFiltersSettings, charFilters, + prebuiltAnalysis.preConfiguredCharFilterFactories); } public Map> buildAnalyzerFactories(IndexSettings indexSettings) throws IOException { @@ -229,14 +233,16 @@ public AnalysisProvider getTokenFilterProvider(String tokenF Settings currentSettings = tokenFilterSettings.get(tokenFilter); String typeName = currentSettings.get("type"); /* - * synonym and synonym_graph are different than everything else since they need access to the tokenizer factories for the index. - * instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general interface and - * hide internal data-structures as much as possible. + * synonym and synonym_graph are different than everything else since they need access to the tokenizer factories for the + * index. instead of building the infrastructure for plugins we rather make it a real exception to not pollute the general + * interface and hide internal data-structures as much as possible. */ if ("synonym".equals(typeName)) { - return requiresAnalysisSettings((is, env, name, settings) -> new SynonymTokenFilterFactory(is, env, this, name, settings)); + return requiresAnalysisSettings((is, env, name, settings) -> + new SynonymTokenFilterFactory(is, env, this, name, settings)); } else if ("synonym_graph".equals(typeName)) { - return requiresAnalysisSettings((is, env, name, settings) -> new SynonymGraphTokenFilterFactory(is, env, this, name, settings)); + return requiresAnalysisSettings((is, env, name, settings) -> + new SynonymGraphTokenFilterFactory(is, env, this, name, settings)); } else { return getAnalysisProvider(Component.FILTER, tokenFilters, tokenFilter, typeName); } @@ -325,7 +331,8 @@ private Map buildMapping(Component component, IndexSettings setti if (currentSettings.get("tokenizer") != null) { factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment); } else { - throw new IllegalArgumentException(component + " [" + name + "] must specify either an analyzer type, or a tokenizer"); + throw new IllegalArgumentException(component + " [" + name + "] " + + "must specify either an analyzer type, or a tokenizer"); } } else if (typeName.equals("custom")) { factory = (T) new CustomAnalyzerProvider(settings, name, currentSettings, environment); @@ -466,8 +473,8 @@ public IndexAnalyzers build(IndexSettings indexSettings, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); } for (Map.Entry> entry : normalizerProviders.entrySet()) { - processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers, - "keyword", tokenizerFactoryFactories.get("keyword"), tokenFilterFactoryFactories, charFilterFactoryFactories); + processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers, "keyword", + tokenizerFactoryFactories.get("keyword"), tokenFilterFactoryFactories, charFilterFactoryFactories); processNormalizerFactory(entry.getKey(), entry.getValue(), whitespaceNormalizers, "whitespace", () -> new WhitespaceTokenizer(), tokenFilterFactoryFactories, charFilterFactoryFactories); } @@ -483,7 +490,8 @@ public IndexAnalyzers build(IndexSettings indexSettings, } if (!analyzers.containsKey("default")) { - processAnalyzerFactory(deprecationLogger, indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS), + processAnalyzerFactory(deprecationLogger, indexSettings, "default", + new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS), analyzerAliases, analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); } if (!analyzers.containsKey("default_search")) { @@ -501,14 +509,17 @@ public IndexAnalyzers build(IndexSettings indexSettings, if (analyzers.containsKey("default_index")) { final Version createdVersion = indexSettings.getIndexVersionCreated(); if (createdVersion.onOrAfter(Version.V_5_0_0_alpha1)) { - throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index.getName() + "]"); + throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use " + + "[index.analysis.analyzer.default] instead for index [" + index.getName() + "]"); } else { - deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use [index.analysis.analyzer.default] instead for index [{}]", index.getName()); + deprecationLogger.deprecated("setting [index.analysis.analyzer.default_index] is deprecated, use " + + "[index.analysis.analyzer.default] instead for index [{}]", index.getName()); } } NamedAnalyzer defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : defaultAnalyzer; NamedAnalyzer defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer; - NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer; + NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? + analyzers.get("default_search_quote") : defaultSearchAnalyzer; for (Map.Entry analyzer : analyzers.entrySet()) { if (analyzer.getKey().startsWith("_")) { diff --git a/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java b/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java index a24c9aef790c9..96fe9454f638f 100644 --- a/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java +++ b/server/src/main/java/org/elasticsearch/index/analysis/CustomAnalyzerProvider.java @@ -55,7 +55,8 @@ public void build(final Map tokenizers, final Map charFilterNames = analyzerSettings.getAsList("char_filter"); @@ -63,7 +64,8 @@ public void build(final Map tokenizers, final Map tokenizers, final Map maxAllowedShingleDiff) { @@ -45,7 +47,8 @@ public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment enviro } String tokenSeparator = settings.get("token_separator", ShingleFilter.DEFAULT_TOKEN_SEPARATOR); String fillerToken = settings.get("filler_token", ShingleFilter.DEFAULT_FILLER_TOKEN); - factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, tokenSeparator, fillerToken); + factory = new Factory("shingle", minShingleSize, maxShingleSize, outputUnigrams, outputUnigramsIfNoShingles, + tokenSeparator, fillerToken); } @@ -74,10 +77,12 @@ public static final class Factory implements TokenFilterFactory { private final String name; public Factory(String name) { - this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true, false, ShingleFilter.DEFAULT_TOKEN_SEPARATOR, ShingleFilter.DEFAULT_FILLER_TOKEN); + this(name, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, true, + false, ShingleFilter.DEFAULT_TOKEN_SEPARATOR, ShingleFilter.DEFAULT_FILLER_TOKEN); } - Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles, String tokenSeparator, String fillerToken) { + Factory(String name, int minShingleSize, int maxShingleSize, boolean outputUnigrams, boolean outputUnigramsIfNoShingles, + String tokenSeparator, String fillerToken) { this.maxShingleSize = maxShingleSize; this.outputUnigrams = outputUnigrams; this.outputUnigramsIfNoShingles = outputUnigramsIfNoShingles; diff --git a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index a59af29036b7d..45573317ea1e0 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/server/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -70,7 +70,8 @@ * and require that it should always be around should use this cache, otherwise the * {@link org.elasticsearch.index.cache.query.QueryCache} should be used instead. */ -public final class BitsetFilterCache extends AbstractIndexComponent implements IndexReader.ClosedListener, RemovalListener>, Closeable { +public final class BitsetFilterCache extends AbstractIndexComponent + implements IndexReader.ClosedListener, RemovalListener>, Closeable { public static final Setting INDEX_LOAD_RANDOM_ACCESS_FILTERS_EAGERLY_SETTING = Setting.boolSetting("index.load_fixed_bitset_filters_eagerly", true, Property.IndexScope); @@ -259,10 +260,12 @@ public IndexWarmer.TerminationHandle warmReader(final IndexShard indexShard, fin final long start = System.nanoTime(); getAndLoadIfNotPresent(filterToWarm, ctx); if (indexShard.warmerService().logger().isTraceEnabled()) { - indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start)); + indexShard.warmerService().logger().trace("warmed bitset for [{}], took [{}]", + filterToWarm, TimeValue.timeValueNanos(System.nanoTime() - start)); } } catch (Exception e) { - indexShard.warmerService().logger().warn(() -> new ParameterizedMessage("failed to load bitset for [{}]", filterToWarm), e); + indexShard.warmerService().logger().warn(() -> new ParameterizedMessage("failed to load " + + "bitset for [{}]", filterToWarm), e); } finally { latch.countDown(); } diff --git a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java index bf1e48e7a6b27..d3e4f010a3c7f 100644 --- a/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/elasticsearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -43,7 +43,8 @@ public class PerFieldMappingPostingFormatCodec extends Lucene70Codec { private final MapperService mapperService; static { - assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) : "PerFieldMappingPostingFormatCodec must subclass the latest lucene codec: " + Lucene.LATEST_CODEC; + assert Codec.forName(Lucene.LATEST_CODEC).getClass().isAssignableFrom(PerFieldMappingPostingFormatCodec.class) : + "PerFieldMappingPostingFormatCodec must subclass the latest " + "lucene codec: " + Lucene.LATEST_CODEC; } public PerFieldMappingPostingFormatCodec(Lucene50StoredFieldsFormat.Mode compressionMode, MapperService mapperService, Logger logger) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java index 7306b4e8cfd1b..b44446e00b6cd 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ElasticsearchConcurrentMergeScheduler.java @@ -92,7 +92,9 @@ protected void doMerge(IndexWriter writer, MergePolicy.OneMerge merge) throws IO onGoingMerges.add(onGoingMerge); if (logger.isTraceEnabled()) { - logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size", OneMergeHelper.getSegmentName(merge), merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes), new ByteSizeValue(merge.estimatedMergeBytes)); + logger.trace("merge [{}] starting..., merging [{}] segments, [{}] docs, [{}] size, into [{}] estimated_size", + OneMergeHelper.getSegmentName(merge), merge.segments.size(), totalNumDocs, new ByteSizeValue(totalSizeInBytes), + new ByteSizeValue(merge.estimatedMergeBytes)); } try { beforeMerge(onGoingMerge); @@ -123,7 +125,8 @@ protected void doMerge(IndexWriter writer, MergePolicy.OneMerge merge) throws IO totalMergeThrottledTime.inc(throttledMS); String message = String.format(Locale.ROOT, - "merge segment [%s] done: took [%s], [%,.1f MB], [%,d docs], [%s stopped], [%s throttled], [%,.1f MB written], [%,.1f MB/sec throttle]", + "merge segment [%s] done: took [%s], [%,.1f MB], [%,d docs], [%s stopped], " + + "[%s throttled], [%,.1f MB written], [%,.1f MB/sec throttle]", OneMergeHelper.getSegmentName(merge), TimeValue.timeValueMillis(tookMS), totalSizeInBytes/1024f/1024f, @@ -167,7 +170,8 @@ protected boolean maybeStall(IndexWriter writer) { @Override protected MergeThread getMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) throws IOException { MergeThread thread = super.getMergeThread(writer, merge); - thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " + thread.getName())); + thread.setName(EsExecutors.threadName(indexSettings, "[" + shardId.getIndexName() + "][" + shardId.id() + "]: " + + thread.getName())); return thread; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 546032347faad..4337864e359ef 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -128,14 +128,14 @@ public abstract class Engine implements Closeable { protected final SetOnce failedEngine = new SetOnce<>(); /* * on {@code lastWriteNanos} we use System.nanoTime() to initialize this since: - * - we use the value for figuring out if the shard / engine is active so if we startup and no write has happened yet we still consider it active - * for the duration of the configured active to inactive period. If we initialize to 0 or Long.MAX_VALUE we either immediately or never mark it - * inactive if no writes at all happen to the shard. - * - we also use this to flush big-ass merges on an inactive engine / shard but if we we initialize 0 or Long.MAX_VALUE we either immediately or never - * commit merges even though we shouldn't from a user perspective (this can also have funky sideeffects in tests when we open indices with lots of segments - * and suddenly merges kick in. - * NOTE: don't use this value for anything accurate it's a best effort for freeing up diskspace after merges and on a shard level to reduce index buffer sizes on - * inactive shards. + * - we use the value for figuring out if the shard / engine is active so if we startup and no write has happened yet we still + * consider it active for the duration of the configured active to inactive period. If we initialize to 0 or Long.MAX_VALUE we + * either immediately or never mark it inactive if no writes at all happen to the shard. + * - we also use this to flush big-ass merges on an inactive engine / shard but if we we initialize 0 or Long.MAX_VALUE we either + * immediately or never commit merges even though we shouldn't from a user perspective (this can also have funky side effects in + * tests when we open indices with lots of segments and suddenly merges kick in. + * NOTE: don't use this value for anything accurate it's a best effort for freeing up diskspace after merges and on a shard level to + * reduce index buffer sizes on inactive shards. */ protected volatile long lastWriteNanos = System.nanoTime(); @@ -156,7 +156,8 @@ protected Engine(EngineConfig engineConfig) { this.shardId = engineConfig.getShardId(); this.allocationId = engineConfig.getAllocationId(); this.store = engineConfig.getStore(); - this.logger = Loggers.getLogger(Engine.class, // we use the engine class directly here to make sure all subclasses have the same logger name + // we use the engine class directly here to make sure all subclasses have the same logger name + this.logger = Loggers.getLogger(Engine.class, engineConfig.getShardId()); this.eventListener = engineConfig.getEventListener(); } @@ -291,7 +292,8 @@ public void deactivate() { assert startOfThrottleNS > 0 : "Bad state of startOfThrottleNS"; long throttleTimeNS = System.nanoTime() - startOfThrottleNS; if (throttleTimeNS >= 0) { - // Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want to add a negative number + // Paranoia (System.nanoTime() is supposed to be monotonic): time slip may have occurred but never want + // to add a negative number throttleTimeMillisMetric.inc(TimeValue.nsecToMSec(throttleTimeNS)); } } @@ -729,12 +731,14 @@ public abstract Translog.Snapshot newChangesSnapshot(String source, MapperServic * Creates a new history snapshot for reading operations since {@code startingSeqNo} (inclusive). * The returned snapshot can be retrieved from either Lucene index or translog files. */ - public abstract Translog.Snapshot readHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; + public abstract Translog.Snapshot readHistoryOperations(String source, + MapperService mapperService, long startingSeqNo) throws IOException; /** * Returns the estimated number of history operations whose seq# at least {@code startingSeqNo}(inclusive) in this engine. */ - public abstract int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException; + public abstract int estimateNumberOfHistoryOperations(String source, + MapperService mapperService, long startingSeqNo) throws IOException; /** * Checks if this engine has every operations since {@code startingSeqNo}(inclusive) in its history (either Lucene or translog) @@ -838,9 +842,11 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment boolean useCompoundFile = segmentCommitInfo.info.getUseCompoundFile(); if (useCompoundFile) { try { - directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), segmentCommitInfo.info, IOContext.READ); + directory = engineConfig.getCodec().compoundFormat().getCompoundReader(segmentReader.directory(), + segmentCommitInfo.info, IOContext.READ); } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); + logger.warn(() -> new ParameterizedMessage("Error when opening compound reader for Directory [{}] and " + + "SegmentCommitInfo [{}]", segmentReader.directory(), segmentCommitInfo), e); return ImmutableOpenMap.of(); } @@ -856,14 +862,17 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment files = directory.listAll(); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn(() -> new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); + logger.warn(() -> + new ParameterizedMessage("Couldn't list Compound Reader Directory [{}]", finalDirectory), e); return ImmutableOpenMap.of(); } } else { try { files = segmentReader.getSegmentInfo().files().toArray(new String[]{}); } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", segmentReader, segmentReader.getSegmentInfo()), e); + logger.warn(() -> + new ParameterizedMessage("Couldn't list Directory from SegmentReader [{}] and SegmentInfo [{}]", + segmentReader, segmentReader.getSegmentInfo()), e); return ImmutableOpenMap.of(); } } @@ -876,10 +885,12 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment length = directory.fileLength(file); } catch (NoSuchFileException | FileNotFoundException e) { final Directory finalDirectory = directory; - logger.warn(() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", finalDirectory, file), e); + logger.warn(() -> new ParameterizedMessage("Tried to query fileLength but file is gone [{}] [{}]", + finalDirectory, file), e); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn(() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", finalDirectory, file), e); + logger.warn(() -> new ParameterizedMessage("Error when trying to query fileLength [{}] [{}]", + finalDirectory, file), e); } if (length == 0L) { continue; @@ -892,7 +903,8 @@ private ImmutableOpenMap getSegmentFileSizes(SegmentReader segment directory.close(); } catch (IOException e) { final Directory finalDirectory = directory; - logger.warn(() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", finalDirectory), e); + logger.warn(() -> new ParameterizedMessage("Error when closing compound reader on Directory [{}]", + finalDirectory), e); } } @@ -1081,7 +1093,8 @@ public void forceMerge(boolean flush) throws IOException { /** * Triggers a forced merge on this engine */ - public abstract void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, boolean upgrade, boolean upgradeOnlyAncientSegments) throws EngineException, IOException; + public abstract void forceMerge(boolean flush, int maxNumSegments, boolean onlyExpungeDeletes, + boolean upgrade, boolean upgradeOnlyAncientSegments) throws EngineException, IOException; /** * Snapshots the most recent index and returns a handle to it. If needed will try and "commit" the @@ -1099,8 +1112,8 @@ public void forceMerge(boolean flush) throws IOException { /** * If the specified throwable contains a fatal error in the throwable graph, such a fatal error will be thrown. Callers should ensure * that there are no catch statements that would catch an error in the stack as the fatal error here should go uncaught and be handled - * by the uncaught exception handler that we install during bootstrap. If the specified throwable does indeed contain a fatal error, the - * specified message will attempt to be logged before throwing the fatal error. If the specified throwable does not contain a fatal + * by the uncaught exception handler that we install during bootstrap. If the specified throwable does indeed contain a fatal error, + * the specified message will attempt to be logged before throwing the fatal error. If the specified throwable does not contain a fatal * error, this method is a no-op. * * @param maybeMessage the message to maybe log @@ -1129,7 +1142,9 @@ public void failEngine(String reason, @Nullable Exception failure) { store.incRef(); try { if (failedEngine.get() != null) { - logger.warn(() -> new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", reason), failure); + logger.warn(() -> + new ParameterizedMessage("tried to fail engine but engine is already failed. ignoring. [{}]", + reason), failure); return; } // this must happen before we close IW or Translog such that we can check this state to opt out of failing the engine @@ -1147,7 +1162,8 @@ public void failEngine(String reason, @Nullable Exception failure) { // the shard is initializing if (Lucene.isCorruptionException(failure)) { try { - store.markStoreCorrupted(new IOException("failed engine (reason: [" + reason + "])", ExceptionsHelper.unwrapCorruption(failure))); + store.markStoreCorrupted(new IOException("failed engine (reason: [" + reason + "])", + ExceptionsHelper.unwrapCorruption(failure))); } catch (IOException e) { logger.warn("Couldn't mark store corrupted", e); } @@ -1162,7 +1178,8 @@ public void failEngine(String reason, @Nullable Exception failure) { store.decRef(); } } else { - logger.debug(() -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should be failed by now [{}]", reason), failure); + logger.debug(() -> new ParameterizedMessage("tried to fail engine but could not acquire lock - engine should " + + "be failed by now [{}]", reason), failure); } } @@ -1416,7 +1433,8 @@ public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, lo } public Delete(String type, String id, Term uid, long primaryTerm) { - this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); + this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, + Origin.PRIMARY, System.nanoTime()); } public Delete(Delete template, VersionType versionType) { @@ -1618,7 +1636,9 @@ public void flushAndClose() throws IOException { try { logger.debug("flushing shard on close - this might take some time to sync files to disk"); try { - flush(); // TODO we might force a flush in the future since we have the write lock already even though recoveries are running. + // TODO we might force a flush in the future since we have the write lock already even though recoveries + // are running. + flush(); } catch (AlreadyClosedException ex) { logger.debug("engine already closed - skipping flushAndClose"); } @@ -1755,7 +1775,8 @@ public interface Warmer { } /** - * Request that this engine throttle incoming indexing requests to one thread. Must be matched by a later call to {@link #deactivateThrottling()}. + * Request that this engine throttle incoming indexing requests to one thread. + * Must be matched by a later call to {@link #deactivateThrottling()}. */ public abstract void activateThrottling(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 5438f94f2620c..ea2f90ee458d0 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -262,7 +262,8 @@ private static LocalCheckpointTracker createLocalCheckpointTracker(EngineConfig // disable the MSU optimization during recovery. Here we prefer to maintain the consistency of LocalCheckpointTracker. if (localCheckpoint < maxSeqNo && engineConfig.getIndexSettings().isSoftDeleteEnabled()) { try (Searcher searcher = searcherSupplier.get()) { - Lucene.scanSeqNosInReader(searcher.getDirectoryReader(), localCheckpoint + 1, maxSeqNo, tracker::markSeqNoAsCompleted); + Lucene.scanSeqNosInReader(searcher.getDirectoryReader(), localCheckpoint + 1, maxSeqNo, + tracker::markSeqNoAsCompleted); } } return tracker; @@ -450,7 +451,8 @@ private void recoverFromTranslogInternal(TranslogRecoveryRunner translogRecovery pendingTranslogRecovery.set(false); // we are good - now we can commit if (opsRecovered > 0) { logger.trace("flushing post recovery from translog. ops recovered [{}]. committed translog id [{}]. current id [{}]", - opsRecovered, translogGeneration == null ? null : translogGeneration.translogFileGeneration, translog.currentFileGeneration()); + opsRecovered, translogGeneration == null ? null : + translogGeneration.translogFileGeneration, translog.currentFileGeneration()); commitIndexWriter(indexWriter, translog, null); refreshLastCommittedSegmentInfos(); refresh("translog_recovery"); @@ -458,7 +460,9 @@ private void recoverFromTranslogInternal(TranslogRecoveryRunner translogRecovery translog.trimUnreferencedReaders(); } - private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, LongSupplier globalCheckpointSupplier) throws IOException { + private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy, + LongSupplier globalCheckpointSupplier) throws IOException { + final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); final String translogUUID = loadTranslogUUIDFromLastCommit(); // A translog checkpoint from 5.x index does not have translog_generation_key and Translog's ctor will read translog gen values @@ -470,7 +474,8 @@ private Translog openTranslog(EngineConfig engineConfig, TranslogDeletionPolicy translogDeletionPolicy.setMinTranslogGenerationForRecovery(minRequiredTranslogGen); } // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! - return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, engineConfig.getPrimaryTermSupplier()); + return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, + engineConfig.getPrimaryTermSupplier()); } // Package private for testing purposes only @@ -518,7 +523,8 @@ public Translog.Snapshot readHistoryOperations(String source, MapperService mapp @Override public int estimateNumberOfHistoryOperations(String source, MapperService mapperService, long startingSeqNo) throws IOException { if (engineConfig.getIndexSettings().isSoftDeleteEnabled()) { - try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), Long.MAX_VALUE, false)) { + try (Translog.Snapshot snapshot = newChangesSnapshot(source, mapperService, Math.max(0, startingSeqNo), + Long.MAX_VALUE, false)) { return snapshot.totalOperations(); } } else { @@ -637,7 +643,8 @@ public GetResult get(Get get, BiFunction search TranslogLeafReader reader = new TranslogLeafReader((Translog.Index) operation, engineConfig .getIndexSettings().getIndexVersionCreated()); return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader::close), - new VersionsAndSeqNoResolver.DocIdAndVersion(0, ((Translog.Index) operation).version(), reader, 0)); + new VersionsAndSeqNoResolver.DocIdAndVersion(0, + ((Translog.Index) operation).version(), reader, 0)); } } catch (IOException e) { maybeFailEngine("realtime_get", e); // lets check if the translog has failed with a tragic event @@ -803,7 +810,8 @@ private boolean assertVersionType(final Engine.Operation operation) { } private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { - if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) && origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { + if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_6_0_0_alpha1) && + origin == Operation.Origin.LOCAL_TRANSLOG_RECOVERY) { // legacy support assert seqNo == SequenceNumbers.UNASSIGNED_SEQ_NO : "old op recovering but it already has a seq no.;" + " index version: " + engineConfig.getIndexSettings().getIndexVersionCreated() + ", seqNo: " + seqNo; @@ -872,8 +880,8 @@ public IndexResult index(Index index) throws IOException { * - is preserved in the transaction log * - and is assigned before we start to index / replicate * NOTE: it's not important for this timestamp to be consistent across nodes etc. it's just a number that is in the common - * case increasing and can be used in the failure case when we retry and resent documents to establish a happens before relationship. - * for instance: + * case increasing and can be used in the failure case when we retry and resent documents to establish a happens before + * relationship. For instance: * - doc A has autoGeneratedIdTimestamp = 10, isRetry = false * - doc B has autoGeneratedIdTimestamp = 9, isRetry = false * @@ -881,11 +889,12 @@ public IndexResult index(Index index) throws IOException { * - now doc A' has autoGeneratedIdTimestamp = 10, isRetry = true * * if A' arrives on the shard first we update maxUnsafeAutoIdTimestamp to 10 and use update document. All subsequent - * documents that arrive (A and B) will also use updateDocument since their timestamps are less than maxUnsafeAutoIdTimestamp. - * While this is not strictly needed for doc B it is just much simpler to implement since it will just de-optimize some doc in the worst case. + * documents that arrive (A and B) will also use updateDocument since their timestamps are less than + * maxUnsafeAutoIdTimestamp. While this is not strictly needed for doc B it is just much simpler to implement since it + * will just de-optimize some doc in the worst case. * - * if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped or calls - * updateDocument. + * if A arrives on the shard first we use addDocument since maxUnsafeAutoIdTimestamp is < 10. A` will then just be skipped + * or calls updateDocument. */ final IndexingStrategy plan = indexingStrategyForOperation(index); @@ -1169,14 +1178,16 @@ private IndexingStrategy(boolean currentNotFoundOrDeleted, boolean useLuceneUpda } public static IndexingStrategy optimizedAppendOnly(long seqNoForIndexing, long versionForIndexing) { - return new IndexingStrategy(true, false, true, false, seqNoForIndexing, versionForIndexing, null); + return new IndexingStrategy(true, false, true, + false, seqNoForIndexing, versionForIndexing, null); } public static IndexingStrategy skipDueToVersionConflict( VersionConflictEngineException e, boolean currentNotFoundOrDeleted, long currentVersion, long term) { final IndexResult result = new IndexResult(e, currentVersion, term); return new IndexingStrategy( - currentNotFoundOrDeleted, false, false, false, SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); + currentNotFoundOrDeleted, false, false, false, + SequenceNumbers.UNASSIGNED_SEQ_NO, Versions.NOT_FOUND, result); } static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, @@ -1187,16 +1198,19 @@ static IndexingStrategy processNormally(boolean currentNotFoundOrDeleted, static IndexingStrategy overrideExistingAsIfNotThere( long seqNoForIndexing, long versionForIndexing) { - return new IndexingStrategy(true, true, true, false, seqNoForIndexing, versionForIndexing, null); + return new IndexingStrategy(true, true, true, + false, seqNoForIndexing, versionForIndexing, null); } public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long seqNoForIndexing, long versionForIndexing) { - return new IndexingStrategy(currentNotFoundOrDeleted, false, false, false, seqNoForIndexing, versionForIndexing, null); + return new IndexingStrategy(currentNotFoundOrDeleted, false, false, + false, seqNoForIndexing, versionForIndexing, null); } static IndexingStrategy processAsStaleOp(boolean addStaleOpToLucene, long seqNoForIndexing, long versionForIndexing) { - return new IndexingStrategy(false, false, false, addStaleOpToLucene, seqNoForIndexing, versionForIndexing, null); + return new IndexingStrategy(false, false, false, + addStaleOpToLucene, seqNoForIndexing, versionForIndexing, null); } } @@ -1209,13 +1223,15 @@ private boolean assertDocDoesNotExist(final Index index, final boolean allowDele final VersionValue versionValue = versionMap.getVersionForAssert(index.uid().bytes()); if (versionValue != null) { if (versionValue.isDelete() == false || allowDeleted == false) { - throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists in version map (version " + versionValue + ")"); + throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists in version map (version " + + versionValue + ")"); } } else { try (Searcher searcher = acquireSearcher("assert doc doesn't exist", SearcherScope.INTERNAL)) { final long docsWithId = searcher.searcher().count(new TermQuery(index.uid())); if (docsWithId > 0) { - throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists [" + docsWithId + "] times in index"); + throw new AssertionError("doc [" + index.type() + "][" + index.id() + "] exists [" + docsWithId + + "] times in index"); } } } @@ -1447,22 +1463,26 @@ public static DeletionStrategy skipDueToVersionConflict( VersionConflictEngineException e, long currentVersion, long term, boolean currentlyDeleted) { final long unassignedSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; final DeleteResult deleteResult = new DeleteResult(e, currentVersion, term, unassignedSeqNo, currentlyDeleted == false); - return new DeletionStrategy(false, false, currentlyDeleted, unassignedSeqNo, Versions.NOT_FOUND, deleteResult); + return new DeletionStrategy(false, false, currentlyDeleted, unassignedSeqNo, + Versions.NOT_FOUND, deleteResult); } static DeletionStrategy processNormally(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { - return new DeletionStrategy(true, false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + return new DeletionStrategy(true, false, currentlyDeleted, seqNoOfDeletion, + versionOfDeletion, null); } public static DeletionStrategy processButSkipLucene(boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { - return new DeletionStrategy(false, false, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + return new DeletionStrategy(false, false, currentlyDeleted, seqNoOfDeletion, + versionOfDeletion, null); } static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted, long seqNoOfDeletion, long versionOfDeletion) { - return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, seqNoOfDeletion, versionOfDeletion, null); + return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, seqNoOfDeletion, + versionOfDeletion, null); } } @@ -1470,7 +1490,8 @@ static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean cur public void maybePruneDeletes() { // It's expensive to prune because we walk the deletes map acquiring dirtyLock for each uid so we only do it // every 1/4 of gcDeletesInMillis: - if (engineConfig.isEnableGcDeletes() && engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { + if (engineConfig.isEnableGcDeletes() && + engineConfig.getThreadPool().relativeTimeInMillis() - lastDeleteVersionPruneTimeMSec > getGcDeletesInMillis() * 0.25) { pruneDeletedTombstones(); } } @@ -1501,8 +1522,9 @@ private NoOpResult innerNoOp(final NoOp noOp) throws IOException { try { final ParsedDocument tombstone = engineConfig.getTombstoneDocSupplier().newNoopTombstoneDoc(noOp.reason()); tombstone.updateSeqID(noOp.seqNo(), noOp.primaryTerm()); - // A noop tombstone does not require a _version but it's added to have a fully dense docvalues for the version field. - // 1L is selected to optimize the compression because it might probably be the most common value in version field. + // A noop tombstone does not require a _version but it's added to have a fully dense docvalues for the version + // field. 1L is selected to optimize the compression because it might probably be the most common value in + // version field. tombstone.version().setLongValue(1L); assert tombstone.docs().size() == 1 : "Tombstone should have a single doc [" + tombstone + "]"; final ParseContext.Document doc = tombstone.docs().get(0); @@ -1665,7 +1687,8 @@ final boolean tryRenewSyncCommit() { @Override public boolean shouldPeriodicallyFlush() { ensureOpen(); - final long translogGenerationOfLastCommit = Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); + final long translogGenerationOfLastCommit = + Long.parseLong(lastCommittedSegmentInfos.userData.get(Translog.TRANSLOG_GENERATION_KEY)); final long flushThreshold = config().getIndexSettings().getFlushThresholdSize().getBytes(); if (translog.sizeInBytesByMinGen(translogGenerationOfLastCommit) < flushThreshold) { return false; @@ -1891,7 +1914,8 @@ public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpu * thread for optimize, and the 'optimizeLock' guarding this code, and (3) ConcurrentMergeScheduler * syncs calls to findForcedMerges. */ - assert indexWriter.getConfig().getMergePolicy() instanceof ElasticsearchMergePolicy : "MergePolicy is " + indexWriter.getConfig().getMergePolicy().getClass().getName(); + assert indexWriter.getConfig().getMergePolicy() instanceof ElasticsearchMergePolicy : "MergePolicy is " + + indexWriter.getConfig().getMergePolicy().getClass().getName(); ElasticsearchMergePolicy mp = (ElasticsearchMergePolicy) indexWriter.getConfig().getMergePolicy(); optimizeLock.lock(); try { @@ -1939,7 +1963,8 @@ public void forceMerge(final boolean flush, int maxNumSegments, boolean onlyExpu throw e; } finally { try { - mp.setUpgradeInProgress(false, false); // reset it just to make sure we reset it in a case of an error + // reset it just to make sure we reset it in a case of an error + mp.setUpgradeInProgress(false, false); } finally { optimizeLock.unlock(); } @@ -2071,7 +2096,8 @@ public List segments(boolean verbose) { @Override protected final void closeNoLock(String reason, CountDownLatch closedLatch) { if (isClosed.compareAndSet(false, true)) { - assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself"; + assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : + "Either the write lock must be held or the engine must be currently be failing itself"; try { this.versionMap.clear(); if (internalSearcherManager != null) { @@ -2203,7 +2229,8 @@ public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) } if (warmer != null) { try { - assert searcher.getIndexReader() instanceof ElasticsearchDirectoryReader : "this class needs an ElasticsearchDirectoryReader but got: " + searcher.getIndexReader().getClass(); + assert searcher.getIndexReader() instanceof ElasticsearchDirectoryReader : + "this class needs an ElasticsearchDirectoryReader but got: " + searcher.getIndexReader().getClass(); warmer.warm(new Searcher("top_reader_warming", searcher, () -> {})); } catch (Exception e) { if (isEngineClosed.get() == false) { @@ -2275,11 +2302,13 @@ public synchronized void afterMerge(OnGoingMerge merge) { int maxNumMerges = mergeScheduler.getMaxMergeCount(); if (numMergesInFlight.decrementAndGet() < maxNumMerges) { if (isThrottling.getAndSet(false)) { - logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", numMergesInFlight, maxNumMerges); + logger.info("stop throttling indexing: numMergesInFlight={}, maxNumMerges={}", + numMergesInFlight, maxNumMerges); deactivateThrottling(); } } - if (indexWriter.hasPendingMerges() == false && System.nanoTime() - lastWriteNanos >= engineConfig.getFlushMergesAfter().nanos()) { + if (indexWriter.hasPendingMerges() == false && + System.nanoTime() - lastWriteNanos >= engineConfig.getFlushMergesAfter().nanos()) { // NEVER do this on a merge thread since we acquire some locks blocking here and if we concurrently rollback the writer // we deadlock on engine#close for instance. engineConfig.getThreadPool().executor(ThreadPool.Names.FLUSH).execute(new AbstractRunnable() { @@ -2631,7 +2660,8 @@ public long softUpdateDocument(Term term, Iterable doc return super.softUpdateDocument(term, doc, softDeletes); } @Override - public long softUpdateDocuments(Term term, Iterable> docs, Field... softDeletes) throws IOException { + public long softUpdateDocuments(Term term, Iterable> docs, + Field... softDeletes) throws IOException { assert softDeleteEnabled : "Call #softUpdateDocuments but soft-deletes is disabled"; return super.softUpdateDocuments(term, docs, softDeletes); } diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 6d6340dd337af..e4dce8919cf1e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -40,8 +40,9 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta private static final class VersionLookup { - /** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, we only account - * for the CHM entry here, and account for BytesRef/VersionValue against the tombstones, since refresh would not clear this RAM. */ + /** Tracks bytes used by current map, i.e. what is freed on refresh. For deletes, which are also added to tombstones, + * we only account for the CHM entry here, and account for BytesRef/VersionValue against the tombstones, since refresh would not + * clear this RAM. */ final AtomicLong ramBytesUsed = new AtomicLong(); private static final VersionLookup EMPTY = new VersionLookup(Collections.emptyMap()); @@ -123,7 +124,8 @@ private static final class Maps { } Maps() { - this(new VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency()), VersionLookup.EMPTY, false); + this(new VersionLookup(ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency()), + VersionLookup.EMPTY, false); } boolean isSafeAccessMode() { @@ -252,8 +254,8 @@ public void afterRefresh(boolean didRefresh) throws IOException { // means Lucene did not actually open a new reader because it detected no changes, it's possible old has some entries in it, which // is fine: it means they were actually already included in the previously opened reader, so we can still safely drop them in that // case. This is because we assign new maps (in beforeRefresh) slightly before Lucene actually flushes any segments for the - // reopen, and so any concurrent indexing requests can still sneak in a few additions to that current map that are in fact reflected - // in the previous reader. We don't touch tombstones here: they expire on their own index.gc_deletes timeframe: + // reopen, and so any concurrent indexing requests can still sneak in a few additions to that current map that are in fact + // reflected in the previous reader. We don't touch tombstones here: they expire on their own index.gc_deletes timeframe: maps = maps.invalidateOldMap(); assert (unsafeKeysMap = unsafeKeysMap.invalidateOldMap()) != null; @@ -416,8 +418,8 @@ synchronized void clear() { maps = new Maps(); tombstones.clear(); // NOTE: we can't zero this here, because a refresh thread could be calling InternalEngine.pruneDeletedTombstones at the same time, - // and this will lead to an assert trip. Presumably it's fine if our ramBytesUsedTombstones is non-zero after clear since the index - // is being closed: + // and this will lead to an assert trip. Presumably it's fine if our ramBytesUsedTombstones is non-zero after clear since the + // index is being closed: //ramBytesUsedTombstones.set(0); } @@ -455,7 +457,8 @@ Map getAllCurrent() { return maps.current.map; } - /** Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones (exposed via reader but not yet GC'd). */ + /** Iterates over all deleted versions, including new ones (not yet exposed via reader) and old ones + * (exposed via reader but not yet GC'd). */ Map getAllTombstones() { return tombstones; } @@ -471,7 +474,8 @@ Releasable acquireLock(BytesRef uid) { } boolean assertKeyedLockHeldByCurrentThread(BytesRef uid) { - assert keyedLock.isHeldByCurrentThread(uid) : "Thread [" + Thread.currentThread().getName() + "], uid [" + uid.utf8ToString() + "]"; + assert keyedLock.isHeldByCurrentThread(uid) : "Thread [" + Thread.currentThread().getName() + + "], uid [" + uid.utf8ToString() + "]"; return true; } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index fc4b0632c8076..0ebd848859cf3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -20,6 +20,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; @@ -66,7 +67,7 @@ public class ReadOnlyEngine extends Engine { private final IndexCommit indexCommit; private final Lock indexWriterLock; private final DocsStats docsStats; - protected final RamAccountingSearcherFactory searcherFactory; + private final RamAccountingSearcherFactory searcherFactory; /** * Creates a new ReadOnlyEngine. This ctor can also be used to open a read-only engine on top of an already opened @@ -414,4 +415,8 @@ public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) { public void initializeMaxSeqNoOfUpdatesOrDeletes() { advanceMaxSeqNoOfUpdatesOrDeletes(seqNoStats.getMaxSeqNo()); } + + protected void processReaders(IndexReader reader, IndexReader previousReader) { + searcherFactory.processReaders(reader, previousReader); + } } diff --git a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java index 97efd9466fbae..87353552c5293 100644 --- a/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/server/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -65,17 +65,19 @@ public final class ShardGetService extends AbstractIndexShardComponent { private final IndexShard indexShard; public ShardGetService(IndexSettings indexSettings, IndexShard indexShard, - MapperService mapperService) { + MapperService mapperService) { super(indexShard.shardId(), indexSettings); this.mapperService = mapperService; this.indexShard = indexShard; } public GetStats stats() { - return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count()); + return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), + missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count()); } - public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) { + public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, + VersionType versionType, FetchSourceContext fetchSourceContext) { return get(type, id, gFields, realtime, version, versionType, fetchSourceContext, false); } @@ -109,7 +111,8 @@ public GetResult getForUpdate(String type, String id, long version, VersionType *

* Note: Call must release engine searcher associated with engineGetResult! */ - public GetResult get(Engine.GetResult engineGetResult, String id, String type, String[] fields, FetchSourceContext fetchSourceContext) { + public GetResult get(Engine.GetResult engineGetResult, String id, String type, + String[] fields, FetchSourceContext fetchSourceContext) { if (!engineGetResult.exists()) { return new GetResult(shardId.getIndexName(), type, id, -1, false, null, null); } @@ -185,7 +188,8 @@ private GetResult innerGet(String type, String id, String[] gFields, boolean rea } } - private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, Engine.GetResult get, MapperService mapperService) { + private GetResult innerGetLoadFromStoredFields(String type, String id, String[] gFields, FetchSourceContext fetchSourceContext, + Engine.GetResult get, MapperService mapperService) { Map fields = null; BytesReference source = null; DocIdAndVersion docIdAndVersion = get.docIdAndVersion(); @@ -209,7 +213,8 @@ private GetResult innerGetLoadFromStoredFields(String type, String id, String[] DocumentMapper docMapper = mapperService.documentMapper(type); if (docMapper.parentFieldMapper().active()) { - String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), docIdAndVersion.reader, docIdAndVersion.docId); + String parentId = ParentFieldSubFetchPhase.getParentId(docMapper.parentFieldMapper(), + docIdAndVersion.reader, docIdAndVersion.docId); if (fields == null) { fields = new HashMap<>(1); } diff --git a/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java b/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java index 603d5c304b634..030ae18dfc401 100644 --- a/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java +++ b/server/src/main/java/org/elasticsearch/index/merge/MergeStats.java @@ -51,8 +51,9 @@ public MergeStats() { } - public void add(long totalMerges, long totalMergeTime, long totalNumDocs, long totalSizeInBytes, long currentMerges, long currentNumDocs, long currentSizeInBytes, - long stoppedTimeMillis, long throttledTimeMillis, double mbPerSecAutoThrottle) { + public void add(long totalMerges, long totalMergeTime, long totalNumDocs, long totalSizeInBytes, + long currentMerges, long currentNumDocs, long currentSizeInBytes, + long stoppedTimeMillis, long throttledTimeMillis, double mbPerSecAutoThrottle) { this.total += totalMerges; this.totalTimeInMillis += totalMergeTime; this.totalNumDocs += totalNumDocs; diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 43fd89b8be335..61eaa011f6587 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -401,7 +401,8 @@ public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBui * @param filterFunctionBuilders the filters and functions to execute * @return the function score query */ - public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, FunctionScoreQueryBuilder.FilterFunctionBuilder[] filterFunctionBuilders) { + public static FunctionScoreQueryBuilder functionScoreQuery(QueryBuilder queryBuilder, + FunctionScoreQueryBuilder.FilterFunctionBuilder[] filterFunctionBuilders) { return new FunctionScoreQueryBuilder(queryBuilder, filterFunctionBuilders); } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryValidationException.java b/server/src/main/java/org/elasticsearch/index/query/QueryValidationException.java index 9e0ee2a1c33e5..10fb77d531155 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryValidationException.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryValidationException.java @@ -37,7 +37,8 @@ public class QueryValidationException extends ValidationException { * @param validationException an initial exception. Can be {@code null}, in which case a new exception is created. * @return a {@link QueryValidationException} with added validation error message */ - public static QueryValidationException addValidationError(String queryId, String validationError, QueryValidationException validationException) { + public static QueryValidationException addValidationError(String queryId, String validationError, + QueryValidationException validationException) { if (validationException == null) { validationException = new QueryValidationException(); } @@ -52,7 +53,8 @@ public static QueryValidationException addValidationError(String queryId, String * @param validationException an initial exception. Can be {@code null}, in which case a new exception is created. * @return a {@link QueryValidationException} with added validation error message */ - public static QueryValidationException addValidationErrors(List validationErrors, QueryValidationException validationException) { + public static QueryValidationException addValidationErrors(List validationErrors, + QueryValidationException validationException) { if (validationException == null) { validationException = new QueryValidationException(); } diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index bcc02496f9c7a..98f3a16868e6b 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -408,6 +408,7 @@ private void checkForPositions(String field) { * Checks if graph analysis should be enabled for the field depending * on the provided {@link Analyzer} */ + @Override protected Query createFieldQuery(Analyzer analyzer, BooleanClause.Occur operator, String field, String queryText, boolean quoted, int phraseSlop) { assert operator == BooleanClause.Occur.SHOULD || operator == BooleanClause.Occur.MUST; diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 92c1d1366eb72..f335134d49835 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -56,16 +56,19 @@ public MultiMatchQuery(QueryShardContext context) { super(context); } - private Query parseAndApply(Type type, String fieldName, Object value, String minimumShouldMatch, Float boostValue) throws IOException { + private Query parseAndApply(Type type, String fieldName, Object value, + String minimumShouldMatch, Float boostValue) throws IOException { Query query = parse(type, fieldName, value); query = Queries.maybeApplyMinimumShouldMatch(query, minimumShouldMatch); - if (query != null && boostValue != null && boostValue != AbstractQueryBuilder.DEFAULT_BOOST && query instanceof MatchNoDocsQuery == false) { + if (query != null && boostValue != null && + boostValue != AbstractQueryBuilder.DEFAULT_BOOST && query instanceof MatchNoDocsQuery == false) { query = new BoostQuery(query, boostValue); } return query; } - public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException { + public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNames, + Object value, String minimumShouldMatch) throws IOException { final Query result; // reset query builder queryBuilder = null; @@ -103,7 +106,8 @@ public QueryBuilder(float tieBreaker) { this.tieBreaker = tieBreaker; } - public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException{ + public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, + Object value, String minimumShouldMatch) throws IOException{ List queries = new ArrayList<>(); for (String fieldName : fieldNames.keySet()) { Float boostValue = fieldNames.get(fieldName); @@ -115,7 +119,10 @@ public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, Object value, String minimumShouldMatch) throws IOException { + public List buildGroupedQueries(MultiMatchQueryBuilder.Type type, Map fieldNames, + Object value, String minimumShouldMatch) throws IOException { Map> groups = new HashMap<>(); List queries = new ArrayList<>(); for (Map.Entry entry : fieldNames.entrySet()) { diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java index 24ed98e9affe3..c0a89e7cf006c 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java @@ -81,7 +81,8 @@ default void afterIndexShardClosed(ShardId shardId, @Nullable IndexShard indexSh * @param currentState the new shard state * @param reason the reason for the state change if there is one, null otherwise */ - default void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, IndexShardState currentState, @Nullable String reason) {} + default void indexShardStateChanged(IndexShard indexShard, @Nullable IndexShardState previousState, + IndexShardState currentState, @Nullable String reason) {} /** * Called when a shard is marked as inactive diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java index 9ecdb6fbd6e05..95c1ecdd2ff31 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexSearcherWrapper.java @@ -68,7 +68,8 @@ protected IndexSearcher wrap(IndexSearcher searcher) throws IOException { * This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search) */ public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOException { - final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader()); + final ElasticsearchDirectoryReader elasticsearchDirectoryReader = + ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader()); if (elasticsearchDirectoryReader == null) { throw new IllegalStateException("Can't wrap non elasticsearch directory reader"); } @@ -76,8 +77,9 @@ public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOExcep DirectoryReader reader = wrap(nonClosingReaderWrapper); if (reader != nonClosingReaderWrapper) { if (reader.getReaderCacheHelper() != elasticsearchDirectoryReader.getReaderCacheHelper()) { - throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate" + - " to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs"); + throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey," + + " wrappers must override this method and delegate to the original readers core cache key. Wrapped readers can't be " + + "used as cache keys since their are used only per request which would lead to subtle bugs"); } if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) { // prevent that somebody wraps with a non-filter reader diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 13045e422c44a..2c6dc30958a7b 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -223,9 +223,11 @@ Runnable getGlobalCheckpointSyncer() { // for primaries, we only allow to write when actually started (so the cluster has decided we started) // in case we have a relocation of a primary, we also allow to write after phase 2 completed, where the shard may be // in state RECOVERING or POST_RECOVERY. - // for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on version checks to make sure its consistent - // a relocated shard can also be target of a replication if the relocation target has not been marked as active yet and is syncing it's changes back to the relocation source - private static final EnumSet writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING, IndexShardState.POST_RECOVERY, IndexShardState.STARTED); + // for replicas, replication is also allowed while recovering, since we index also during recovery to replicas and rely on + // version checks to make sure its consistent a relocated shard can also be target of a replication if the relocation target has not + // been marked as active yet and is syncing it's changes back to the relocation source + private static final EnumSet writeAllowedStates = EnumSet.of(IndexShardState.RECOVERING, + IndexShardState.POST_RECOVERY, IndexShardState.STARTED); private final IndexSearcherWrapper searcherWrapper; @@ -413,10 +415,12 @@ public void updateShardState(final ShardRouting newRouting, currentRouting = this.shardRouting; if (!newRouting.shardId().equals(shardId())) { - throw new IllegalArgumentException("Trying to set a routing entry with shardId " + newRouting.shardId() + " on a shard with shardId " + shardId()); + throw new IllegalArgumentException("Trying to set a routing entry with shardId " + + newRouting.shardId() + " on a shard with shardId " + shardId()); } if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) { - throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting); + throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + + currentRouting + ", new " + newRouting); } if (currentRouting != null && currentRouting.primary() && newRouting.primary() == false) { throw new IllegalArgumentException("illegal state: trying to move shard from primary mode to replica mode. Current " @@ -438,10 +442,11 @@ public void updateShardState(final ShardRouting newRouting, changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]"); } else if (currentRouting.primary() && currentRouting.relocating() && replicationTracker.isRelocated() && (newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) { - // if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard routing occur (e.g. due to recovery - // failure / cancellation). The reason is that at the moment we cannot safely reactivate primary mode without risking two - // active primaries. - throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state()); + // if the shard is not in primary mode anymore (after primary relocation) we have to fail when any changes in shard + // routing occur (e.g. due to recovery failure / cancellation). The reason is that at the moment we cannot safely + // reactivate primary mode without risking two active primaries. + throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + + newRouting.state()); } assert newRouting.active() == false || state == IndexShardState.STARTED || state == IndexShardState.CLOSED : "routing is active, but local shard state isn't. routing: " + newRouting + ", local state: " + state; @@ -494,8 +499,8 @@ public void updateShardState(final ShardRouting newRouting, "primary terms can only go up; current term [" + pendingPrimaryTerm + "], new term [" + newPrimaryTerm + "]"; /* * Before this call returns, we are guaranteed that all future operations are delayed and so this happens before we - * increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary term is - * incremented. + * increment the primary term. The latch is needed to ensure that we do not unblock operations before the primary + * term is incremented. */ // to prevent primary relocation handoff while resync is not completed boolean resyncStarted = primaryReplicaResyncInProgress.compareAndSet(false, true); @@ -549,13 +554,15 @@ public void updateShardState(final ShardRouting newRouting, public void onResponse(ResyncTask resyncTask) { logger.info("primary-replica resync completed with {} operations", resyncTask.getResyncedOperations()); - boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false); + boolean resyncCompleted = + primaryReplicaResyncInProgress.compareAndSet(true, false); assert resyncCompleted : "primary-replica resync finished but was not started"; } @Override public void onFailure(Exception e) { - boolean resyncCompleted = primaryReplicaResyncInProgress.compareAndSet(true, false); + boolean resyncCompleted = + primaryReplicaResyncInProgress.compareAndSet(true, false); assert resyncCompleted : "primary-replica resync finished but was not started"; if (state == IndexShardState.CLOSED) { // ignore, shutting down @@ -621,7 +628,8 @@ public IndexShardState markAsRecovering(String reason, RecoveryState recoverySta * @throws IllegalIndexShardStateException if the shard is not relocating due to concurrent cancellation * @throws InterruptedException if blocking operations is interrupted */ - public void relocated(final Consumer consumer) throws IllegalIndexShardStateException, InterruptedException { + public void relocated(final Consumer consumer) + throws IllegalIndexShardStateException, InterruptedException { assert shardRouting.primary() : "only primaries can be marked as relocated: " + shardRouting; try { indexShardOperationPermits.blockOperations(30, TimeUnit.MINUTES, () -> { @@ -1235,7 +1243,8 @@ public void close(String reason, boolean flushEngine) throws IOException { } } - public IndexShard postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { + public IndexShard postRecovery(String reason) + throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { synchronized (mutex) { if (state == IndexShardState.CLOSED) { throw new IndexShardClosedException(shardId); @@ -1508,7 +1517,8 @@ public boolean ignoreRecoveryAttempt() { public void readAllowed() throws IllegalIndexShardStateException { IndexShardState state = this.state; // one time volatile read if (readAllowedStates.contains(state) == false) { - throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " + readAllowedStates.toString()); + throw new IllegalIndexShardStateException(shardId, state, "operations only allowed when shard state is one of " + + readAllowedStates.toString()); } } @@ -1522,7 +1532,8 @@ private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIn if (origin.isRecovery()) { if (state != IndexShardState.RECOVERING) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when recovering, origin [" + origin + "]"); + throw new IllegalIndexShardStateException(shardId, state, + "operation only allowed when recovering, origin [" + origin + "]"); } } else { if (origin == Engine.Operation.Origin.PRIMARY) { @@ -1534,13 +1545,15 @@ private void ensureWriteAllowed(Engine.Operation.Origin origin) throws IllegalIn assert getActiveOperationsCount() == 0 : "Ongoing writes [" + getActiveOperations() + "]"; } if (writeAllowedStates.contains(state) == false) { - throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + writeAllowedStates + ", origin [" + origin + "]"); + throw new IllegalIndexShardStateException(shardId, state, "operation only allowed when shard state is one of " + + writeAllowedStates + ", origin [" + origin + "]"); } } } private boolean assertPrimaryMode() { - assert shardRouting.primary() && replicationTracker.isPrimaryMode() : "shard " + shardRouting + " is not a primary shard in primary mode"; + assert shardRouting.primary() && replicationTracker.isPrimaryMode() : "shard " + shardRouting + + " is not a primary shard in primary mode"; return true; } @@ -1617,9 +1630,11 @@ public ShardPath shardPath() { return path; } - public boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer, List localShards) throws IOException { + public boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer, + List localShards) throws IOException { assert shardRouting.primary() : "recover from local shards only makes sense if the shard is a primary shard"; - assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: " + recoveryState.getRecoverySource(); + assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS : "invalid recovery type: " + + recoveryState.getRecoverySource(); final List snapshots = new ArrayList<>(); try { for (IndexShard shard : localShards) { @@ -1647,7 +1662,8 @@ public boolean recoverFromStore() { public boolean restoreFromRepository(Repository repository) { assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard"; - assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + recoveryState.getRecoverySource(); + assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.SNAPSHOT : "invalid recovery type: " + + recoveryState.getRecoverySource(); StoreRecovery storeRecovery = new StoreRecovery(shardId, logger); return storeRecovery.recoverFromRepository(this, repository); } @@ -1736,7 +1752,8 @@ public boolean hasCompleteHistoryOperations(String source, long startingSeqNo) t * if any operation between {@code fromSeqNo} and {@code toSeqNo} is missing. * This parameter should be only enabled when the entire requesting range is below the global checkpoint. */ - public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo, long toSeqNo, boolean requiredFullRange) throws IOException { + public Translog.Snapshot newChangesSnapshot(String source, long fromSeqNo, + long toSeqNo, boolean requiredFullRange) throws IOException { return getEngine().newChangesSnapshot(source, mapperService, fromSeqNo, toSeqNo, requiredFullRange); } @@ -1995,7 +2012,8 @@ assert state() != IndexShardState.POST_RECOVERY && state() != IndexShardState.ST * @param primaryContext the sequence number context */ public void activateWithPrimaryContext(final ReplicationTracker.PrimaryContext primaryContext) { - assert shardRouting.primary() && shardRouting.isRelocationTarget() : "only primary relocation target can update allocation IDs from primary context: " + shardRouting; + assert shardRouting.primary() && shardRouting.isRelocationTarget() : + "only primary relocation target can update allocation IDs from primary context: " + shardRouting; assert primaryContext.getCheckpointStates().containsKey(routingEntry().allocationId().getId()) && getLocalCheckpoint() == primaryContext.getCheckpointStates().get(routingEntry().allocationId().getId()).getLocalCheckpoint(); synchronized (mutex) { @@ -2136,7 +2154,8 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService recoveryListener.onRecoveryDone(recoveryState); } } catch (Exception e) { - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); + recoveryListener.onRecoveryFailure(recoveryState, + new RecoveryFailedException(recoveryState, null, e), true); } }); break; @@ -2146,7 +2165,8 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener); } catch (Exception e) { failShard("corrupted preexisting index", e); - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); + recoveryListener.onRecoveryFailure(recoveryState, + new RecoveryFailedException(recoveryState, null, e), true); } break; case SNAPSHOT: @@ -2159,7 +2179,8 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService recoveryListener.onRecoveryDone(recoveryState); } } catch (Exception e) { - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); + recoveryListener.onRecoveryFailure(recoveryState, + new RecoveryFailedException(recoveryState, null, e), true); } }); break; @@ -2453,7 +2474,8 @@ public void onFailure(final Exception e) { } public int getActiveOperationsCount() { - return indexShardOperationPermits.getActiveOperationsCount(); // refCount is incremented on successful acquire and decremented on close + // refCount is incremented on successful acquire and decremented on close + return indexShardOperationPermits.getActiveOperationsCount(); } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java b/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java index fa658c3600ea4..c7b6a99d2c7e1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java @@ -49,7 +49,8 @@ public static class Stats implements Streamable, ToXContentFragment { Stats() {} - public Stats(long indexCount, long indexTimeInMillis, long indexCurrent, long indexFailedCount, long deleteCount, long deleteTimeInMillis, long deleteCurrent, long noopUpdateCount, boolean isThrottled, long throttleTimeInMillis) { + public Stats(long indexCount, long indexTimeInMillis, long indexCurrent, long indexFailedCount, long deleteCount, + long deleteTimeInMillis, long deleteCurrent, long noopUpdateCount, boolean isThrottled, long throttleTimeInMillis) { this.indexCount = indexCount; this.indexTimeInMillis = indexTimeInMillis; this.indexCurrent = indexCurrent; diff --git a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java index 683a5a79c36af..f3e631f8bf6e0 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java +++ b/server/src/main/java/org/elasticsearch/index/shard/PrimaryReplicaSyncer.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.index.shard; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -26,7 +27,6 @@ import org.elasticsearch.action.resync.ResyncReplicationResponse; import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -52,7 +52,9 @@ import static java.util.Objects.requireNonNull; -public class PrimaryReplicaSyncer extends AbstractComponent { +public class PrimaryReplicaSyncer { + + private static final Logger logger = LogManager.getLogger(PrimaryReplicaSyncer.class); private final TaskManager taskManager; private final SyncAction syncAction; diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java index 1490d9f9a00e6..e4fa11bc1f4f7 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -45,10 +45,14 @@ public final class ShardPath { private final boolean isCustomDataPath; public ShardPath(boolean isCustomDataPath, Path dataPath, Path shardStatePath, ShardId shardId) { - assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : "dataPath must end with the shard ID but didn't: " + dataPath.toString(); - assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) : "shardStatePath must end with the shard ID but didn't: " + dataPath.toString(); - assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "dataPath must end with index path id but didn't: " + dataPath.toString(); - assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : "shardStatePath must end with index path id but didn't: " + dataPath.toString(); + assert dataPath.getFileName().toString().equals(Integer.toString(shardId.id())) : + "dataPath must end with the shard ID but didn't: " + dataPath.toString(); + assert shardStatePath.getFileName().toString().equals(Integer.toString(shardId.id())) : + "shardStatePath must end with the shard ID but didn't: " + dataPath.toString(); + assert dataPath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : + "dataPath must end with index path id but didn't: " + dataPath.toString(); + assert shardStatePath.getParent().getFileName().toString().equals(shardId.getIndex().getUUID()) : + "shardStatePath must end with index path id but didn't: " + dataPath.toString(); if (isCustomDataPath && dataPath.equals(shardStatePath)) { throw new IllegalArgumentException("shard state path must be different to the data path when using custom data paths"); } @@ -111,7 +115,8 @@ public boolean isCustomDataPath() { * directories with a valid shard state exist the one with the highest version will be used. * Note: this method resolves custom data locations for the shard. */ - public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, ShardId shardId, IndexSettings indexSettings) throws IOException { + public static ShardPath loadShardPath(Logger logger, NodeEnvironment env, + ShardId shardId, IndexSettings indexSettings) throws IOException { final Path[] paths = env.availableShardPaths(shardId); final int nodeLockId = env.getNodeLockId(); final Path sharedDataPath = env.sharedDataPath(); @@ -165,7 +170,8 @@ public static ShardPath loadShardPath(Logger logger, ShardId shardId, IndexSetti * This method tries to delete left-over shards where the index name has been reused but the UUID is different * to allow the new shard to be allocated. */ - public static void deleteLeftoverShardDirectory(Logger logger, NodeEnvironment env, ShardLock lock, IndexSettings indexSettings) throws IOException { + public static void deleteLeftoverShardDirectory(Logger logger, NodeEnvironment env, + ShardLock lock, IndexSettings indexSettings) throws IOException { final String indexUUID = indexSettings.getUUID(); final Path[] paths = env.availableShardPaths(lock.getShardId()); for (Path path : paths) { @@ -233,7 +239,8 @@ public static ShardPath selectNewPathForShard(NodeEnvironment env, ShardId shard .filter((path) -> pathsToSpace.get(path).subtract(estShardSizeInBytes).compareTo(BigInteger.ZERO) > 0) // Sort by the number of shards for this index .sorted((p1, p2) -> { - int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L), pathToShardCount.getOrDefault(p2, 0L)); + int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L), + pathToShardCount.getOrDefault(p2, 0L)); if (cmp == 0) { // if the number of shards is equal, tie-break with the number of total shards cmp = Integer.compare(dataPathToShardCount.getOrDefault(p1.path, 0), diff --git a/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java b/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java index 3f3f2a78100af..5d4a093cc4ba2 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java +++ b/server/src/main/java/org/elasticsearch/index/shard/ShardStateMetaData.java @@ -88,7 +88,8 @@ public String toString() { return "primary [" + primary + "], allocation [" + allocationId + "]"; } - public static final MetaDataStateFormat FORMAT = new MetaDataStateFormat(SHARD_STATE_FILE_PREFIX) { + public static final MetaDataStateFormat FORMAT = + new MetaDataStateFormat(SHARD_STATE_FILE_PREFIX) { @Override protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 77e99c44d179f..c57fc08166b47 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -98,7 +98,8 @@ boolean recoverFromStore(final IndexShard indexShard) { return false; } - boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer, final IndexShard indexShard, final List shards) throws IOException { + boolean recoverFromLocalShards(BiConsumer mappingUpdateConsumer, + final IndexShard indexShard, final List shards) throws IOException { if (canRecover(indexShard)) { RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); assert recoveryType == RecoverySource.Type.LOCAL_SHARDS: "expected local shards recovery type: " + recoveryType; @@ -133,7 +134,8 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate internalRecoverFromStore(indexShard); // just trigger a merge to do housekeeping on the // copied segments - we will also see them in stats etc. - indexShard.getEngine().forceMerge(false, -1, false, false, false); + indexShard.getEngine().forceMerge(false, -1, false, + false, false); } catch (IOException ex) { throw new IndexShardRecoveryException(indexShard.shardId(), "failed to recover from local shards", ex); } @@ -304,7 +306,8 @@ private boolean executeRecovery(final IndexShard indexShard, Runnable recoveryRu // to call post recovery. final IndexShardState shardState = indexShard.state(); final RecoveryState recoveryState = indexShard.recoveryState(); - assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]"; + assert shardState != IndexShardState.CREATED && shardState != IndexShardState.RECOVERING : + "recovery process of " + shardId + " didn't get to post_recovery. shardState [" + shardState + "]"; if (logger.isTraceEnabled()) { RecoveryState.Index index = recoveryState.getIndex(); @@ -316,11 +319,13 @@ private boolean executeRecovery(final IndexShard indexShard, Runnable recoveryRu .append(new ByteSizeValue(index.recoveredBytes())).append("]\n"); sb.append(" : reusing_files [").append(index.reusedFileCount()).append("] with total_size [") .append(new ByteSizeValue(index.reusedBytes())).append("]\n"); - sb.append(" verify_index : took [").append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [") - .append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n"); + sb.append(" verify_index : took [") + .append(TimeValue.timeValueMillis(recoveryState.getVerifyIndex().time())).append("], check_index [") + .append(timeValueMillis(recoveryState.getVerifyIndex().checkIndexTime())).append("]\n"); sb.append(" translog : number_of_operations [").append(recoveryState.getTranslog().recoveredOperations()) .append("], took [").append(TimeValue.timeValueMillis(recoveryState.getTranslog().time())).append("]"); - logger.trace("recovery completed from [shard_store], took [{}]\n{}", timeValueMillis(recoveryState.getTimer().time()), sb); + logger.trace("recovery completed from [shard_store], took [{}]\n{}", + timeValueMillis(recoveryState.getTimer().time()), sb); } else if (logger.isDebugEnabled()) { logger.debug("recovery completed from [shard_store], took [{}]", timeValueMillis(recoveryState.getTimer().time())); } @@ -371,7 +376,8 @@ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRe files += " (failure=" + ExceptionsHelper.detailedMessage(inner) + ")"; } if (indexShouldExists) { - throw new IndexShardRecoveryException(shardId, "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e); + throw new IndexShardRecoveryException(shardId, + "shard allocated for local recovery (post api), should exist, but doesn't, current files: " + files, e); } } if (si != null) { @@ -462,7 +468,8 @@ private void restore(final IndexShard indexShard, final Repository repository, f snapshotShardId = new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, shardId.id()); } final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName); - repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); + repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), + restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState()); final Store store = indexShard.store(); store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 87b36e95778f3..4e06838d0cd4f 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -459,7 +459,8 @@ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId * can be successfully opened. This includes reading the segment infos and possible * corruption markers. */ - public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException { + public static boolean canOpenIndex(Logger logger, Path indexLocation, + ShardId shardId, NodeEnvironment.ShardLocker shardLocker) throws IOException { try { tryOpenIndex(indexLocation, shardId, shardLocker, logger); } catch (Exception ex) { @@ -474,7 +475,8 @@ public static boolean canOpenIndex(Logger logger, Path indexLocation, ShardId sh * segment infos and possible corruption markers. If the index can not * be opened, an exception is thrown */ - public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, Logger logger) throws IOException, ShardLockObtainFailedException { + public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker, + Logger logger) throws IOException, ShardLockObtainFailedException { try (ShardLock lock = shardLocker.lock(shardId, TimeUnit.SECONDS.toMillis(5)); Directory dir = new SimpleFSDirectory(indexLocation)) { failIfCorrupted(dir, shardId); @@ -489,7 +491,8 @@ public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnviron * Note: Checksums are calculated by default since version 4.8.0. This method only adds the * verification against the checksum in the given metadata and does not add any significant overhead. */ - public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, final IOContext context) throws IOException { + public IndexOutput createVerifyingOutput(String fileName, final StoreFileMetaData metadata, + final IOContext context) throws IOException { IndexOutput output = directory().createOutput(fileName, context); boolean success = false; try { @@ -537,7 +540,8 @@ public static boolean checkIntegrityNoException(StoreFileMetaData md, Directory public static void checkIntegrity(final StoreFileMetaData md, final Directory directory) throws IOException { try (IndexInput input = directory.openInput(md.name(), IOContext.READONCE)) { if (input.length() != md.length()) { // first check the length no matter how old this file is - throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input); + throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + + " : file truncated?", input); } // throw exception if the file is corrupt String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input)); @@ -650,7 +654,9 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) { for (String existingFile : directory.listAll()) { if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) { - continue; // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum) + // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete + // checksum) + continue; } try { directory.deleteFile(reason, existingFile); @@ -660,7 +666,8 @@ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) thr || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN) || existingFile.startsWith(CORRUPTED)) { // TODO do we need to also fail this if we can't delete the pending commit file? - // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around? + // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit + // point around? throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); } logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex); @@ -685,13 +692,14 @@ final void verifyAfterCleanup(MetadataSnapshot sourceMetaData, MetadataSnapshot StoreFileMetaData remote = sourceMetaData.get(meta.name()); // if we have different files then they must have no checksums; otherwise something went wrong during recovery. // we have that problem when we have an empty index is only a segments_1 file so we can't tell if it's a Lucene 4.8 file - // and therefore no checksum is included. That isn't a problem since we simply copy it over anyway but those files come out as - // different in the diff. That's why we have to double check here again if the rest of it matches. + // and therefore no checksum is included. That isn't a problem since we simply copy it over anyway but those files + // come out as different in the diff. That's why we have to double check here again if the rest of it matches. // all is fine this file is just part of a commit or a segment that is different if (local.isSame(remote) == false) { logger.debug("Files are different on the recovery target: {} ", recoveryDiff); - throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null); + throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + + remote, null); } } } else { @@ -851,7 +859,8 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory); numDocs = Lucene.getNumDocs(segmentCommitInfos); commitUserDataBuilder.putAll(segmentCommitInfos.getUserData()); - Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); // we don't know which version was used to write so we take the max version. + // we don't know which version was used to write so we take the max version. + Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); for (SegmentCommitInfo info : segmentCommitInfos) { final Version version = info.info.getVersion(); if (version == null) { @@ -862,7 +871,8 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg maxVersion = version; } for (String file : info.files()) { - checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file))); + checksumFromLuceneFile(directory, file, builder, logger, version, + SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file))); } } if (maxVersion == null) { @@ -878,7 +888,9 @@ static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, Logg // Lucene checks the checksum after it tries to lookup the codec etc. // in that case we might get only IAE or similar exceptions while we are really corrupt... // TODO we should check the checksum in lucene if we hit an exception - logger.warn(() -> new ParameterizedMessage("failed to build store metadata. checking segment info integrity (with commit [{}])", commit == null ? "no" : "yes"), ex); + logger.warn(() -> + new ParameterizedMessage("failed to build store metadata. checking segment info integrity " + + "(with commit [{}])", commit == null ? "no" : "yes"), ex); Lucene.checkSegmentInfoIntegrity(directory); } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException cex) { cex.addSuppressed(ex); @@ -902,10 +914,12 @@ private static void checksumFromLuceneFile(Directory directory, String file, Map length = in.length(); if (length < CodecUtil.footerLength()) { // truncated files trigger IAE if we seek negative... these files are really corrupted though - throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length(), in); + throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + + CodecUtil.footerLength() + " but was: " + in.length(), in); } if (readFileAsHash) { - final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in); // additional safety we checksum the entire file we read the hash for... + // additional safety we checksum the entire file we read the hash for... + final VerifyingIndexInput verifyingIndexInput = new VerifyingIndexInput(in); hashFile(fileHash, new InputStreamIndexInput(verifyingIndexInput, length), length); checksum = digestToString(verifyingIndexInput.verify()); } else { @@ -964,19 +978,21 @@ public Map asMap() { *

    *
  • all files in this segment have the same checksum
  • *
  • all files in this segment have the same length
  • - *
  • the segments {@code .si} files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the {@code .si} file content as it's hash
  • + *
  • the segments {@code .si} files hashes are byte-identical Note: This is a using a perfect hash function, + * The metadata transfers the {@code .si} file content as it's hash
  • *
*

* The {@code .si} file contains a lot of diagnostics including a timestamp etc. in the future there might be * unique segment identifiers in there hardening this method further. *

- * The per-commit files handles very similar. A commit is composed of the {@code segments_N} files as well as generational files like - * deletes ({@code _x_y.del}) or field-info ({@code _x_y.fnm}) files. On a per-commit level files for a commit are treated + * The per-commit files handles very similar. A commit is composed of the {@code segments_N} files as well as generational files + * like deletes ({@code _x_y.del}) or field-info ({@code _x_y.fnm}) files. On a per-commit level files for a commit are treated * as identical iff: *

    *
  • all files belonging to this commit have the same checksum
  • *
  • all files belonging to this commit have the same length
  • - *
  • the segments file {@code segments_N} files hashes are byte-identical Note: This is a using a perfect hash function, The metadata transfers the {@code segments_N} file content as it's hash
  • + *
  • the segments file {@code segments_N} files hashes are byte-identical Note: This is a using a perfect hash function, + * The metadata transfers the {@code segments_N} file content as it's hash
  • *
*

* NOTE: this diff will not contain the {@code segments.gen} file. This file is omitted on recovery. @@ -994,7 +1010,8 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { } final String segmentId = IndexFileNames.parseSegmentName(meta.name()); final String extension = IndexFileNames.getExtension(meta.name()); - if (IndexFileNames.SEGMENTS.equals(segmentId) || DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) { + if (IndexFileNames.SEGMENTS.equals(segmentId) || + DEL_FILE_EXTENSION.equals(extension) || LIV_FILE_EXTENSION.equals(extension)) { // only treat del files as per-commit files fnm files are generational but only for upgradable DV perCommitStoreFiles.add(meta); } else { @@ -1029,9 +1046,11 @@ public RecoveryDiff recoveryDiff(MetadataSnapshot recoveryTargetSnapshot) { different.addAll(identicalFiles); } } - RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical), Collections.unmodifiableList(different), Collections.unmodifiableList(missing)); + RecoveryDiff recoveryDiff = new RecoveryDiff(Collections.unmodifiableList(identical), + Collections.unmodifiableList(different), Collections.unmodifiableList(missing)); assert recoveryDiff.size() == this.metadata.size() - (metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) ? 1 : 0) - : "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]"; + : "some files are missing recoveryDiff size: [" + recoveryDiff.size() + "] metadata size: [" + + this.metadata.size() + "] contains segments.gen: [" + metadata.containsKey(IndexFileNames.OLD_SEGMENTS_GEN) + "]"; return recoveryDiff; } @@ -1184,8 +1203,8 @@ public void verify() throws IOException { } } throw new CorruptIndexException("verification failed (hardware problem?) : expected=" + metadata.checksum() + - " actual=" + actualChecksum + " footer=" + footerDigest +" writtenLength=" + writtenBytes + " expectedLength=" + metadata.length() + - " (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")"); + " actual=" + actualChecksum + " footer=" + footerDigest +" writtenLength=" + writtenBytes + " expectedLength=" + + metadata.length() + " (resource=" + metadata.toString() + ")", "VerifyingIndexOutput(" + metadata.name() + ")"); } @Override @@ -1203,7 +1222,8 @@ public void writeByte(byte b) throws IOException { } } else { verify(); // fail if we write more than expected - throw new AssertionError("write past EOF expected length: " + metadata.length() + " writtenBytes: " + writtenBytes); + throw new AssertionError("write past EOF expected length: " + metadata.length() + + " writtenBytes: " + writtenBytes); } } out.writeByte(b); diff --git a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java index 6d60e2ab70b5e..80cefd58beb5f 100644 --- a/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java +++ b/server/src/main/java/org/elasticsearch/index/termvectors/TermVectorsService.java @@ -79,7 +79,8 @@ public static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVect static TermVectorsResponse getTermVectors(IndexShard indexShard, TermVectorsRequest request, LongSupplier nanoTimeSupplier) { final long startTime = nanoTimeSupplier.getAsLong(); - final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(), request.type(), request.id()); + final TermVectorsResponse termVectorsResponse = new TermVectorsResponse(indexShard.shardId().getIndex().getName(), + request.type(), request.id()); final Term uidTerm = indexShard.mapperService().createUidTerm(request.type(), request.id()); if (uidTerm == null) { termVectorsResponse.setExists(false); @@ -143,7 +144,8 @@ else if (docIdAndVersion != null) { } } // write term vectors - termVectorsResponse.setFields(termVectorsByField, request.selectedFields(), request.getFlags(), topLevelFields, dfs, termVectorsFilter); + termVectorsResponse.setFields(termVectorsByField, request.selectedFields(), + request.getFlags(), topLevelFields, dfs, termVectorsFilter); } termVectorsResponse.setTookInMillis(TimeUnit.NANOSECONDS.toMillis(nanoTimeSupplier.getAsLong() - startTime)); } catch (Exception ex) { @@ -176,7 +178,8 @@ private static boolean isValidField(MappedFieldType fieldType) { return true; } - private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField, TermVectorsRequest request, Set selectedFields) throws IOException { + private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetResult get, Fields termVectorsByField, + TermVectorsRequest request, Set selectedFields) throws IOException { /* only keep valid fields */ Set validFields = new HashSet<>(); for (String field : selectedFields) { @@ -201,7 +204,8 @@ private static Fields addGeneratedTermVectors(IndexShard indexShard, Engine.GetR getFields[getFields.length - 1] = SourceFieldMapper.NAME; GetResult getResult = indexShard.getService().get( get, request.id(), request.type(), getFields, null); - Fields generatedTermVectors = generateTermVectors(indexShard, getResult.sourceAsMap(), getResult.getFields().values(), request.offsets(), request.perFieldAnalyzer(), validFields); + Fields generatedTermVectors = generateTermVectors(indexShard, getResult.sourceAsMap(), getResult.getFields().values(), + request.offsets(), request.perFieldAnalyzer(), validFields); /* merge with existing Fields */ if (termVectorsByField == null) { @@ -241,7 +245,12 @@ private static Set getFieldsToGenerate(Map perAnalyzerFi return selectedFields; } - private static Fields generateTermVectors(IndexShard indexShard, Map source, Collection getFields, boolean withOffsets, @Nullable Map perFieldAnalyzer, Set fields) throws IOException { + private static Fields generateTermVectors(IndexShard indexShard, + Map source, + Collection getFields, + boolean withOffsets, + @Nullable Map perFieldAnalyzer, + Set fields) throws IOException { Map> values = new HashMap<>(); for (DocumentField getField : getFields) { String field = getField.getName(); @@ -303,8 +312,9 @@ private static Fields generateTermVectorsFromDoc(IndexShard indexShard, TermVect String[] values = doc.getValues(field.name()); documentFields.add(new DocumentField(field.name(), Arrays.asList((Object[]) values))); } - return generateTermVectors(indexShard, XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(), - documentFields, request.offsets(), request.perFieldAnalyzer(), seenFields); + return generateTermVectors(indexShard, + XContentHelper.convertToMap(parsedDocument.source(), true, request.xContentType()).v2(), documentFields, + request.offsets(), request.perFieldAnalyzer(), seenFields); } private static ParsedDocument parseDocument(IndexShard indexShard, String index, String type, BytesReference doc, diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index 41c3252eab07a..c61718332cde3 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -38,7 +38,8 @@ public abstract class BaseTranslogReader implements Comparable= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" + reusableBuffer.capacity() + "]"; + assert reusableBuffer.capacity() >= 4 : "reusable buffer must have capacity >=4 when reading opSize. got [" + + reusableBuffer.capacity() + "]"; reusableBuffer.clear(); reusableBuffer.limit(4); readBytes(reusableBuffer, position); @@ -94,7 +96,8 @@ public TranslogSnapshot newSnapshot() { * reads an operation at the given position and returns it. The buffer length is equal to the number * of bytes reads. */ - protected final BufferedChecksumStreamInput checksummedStream(ByteBuffer reusableBuffer, long position, int opSize, BufferedChecksumStreamInput reuse) throws IOException { + protected final BufferedChecksumStreamInput checksummedStream(ByteBuffer reusableBuffer, long position, int opSize, + BufferedChecksumStreamInput reuse) throws IOException { final ByteBuffer buffer; if (reusableBuffer.capacity() >= opSize) { buffer = reusableBuffer; diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 233531d2c8f65..c1b4fb5f05b04 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -77,8 +77,9 @@ * In Elasticsearch there is one Translog instance per {@link org.elasticsearch.index.engine.InternalEngine}. The engine * records the current translog generation {@link Translog#getGeneration()} in it's commit metadata using {@link #TRANSLOG_GENERATION_KEY} * to reference the generation that contains all operations that have not yet successfully been committed to the engines lucene index. - * Additionally, since Elasticsearch 2.0 the engine also records a {@link #TRANSLOG_UUID_KEY} with each commit to ensure a strong association - * between the lucene index an the transaction log file. This UUID is used to prevent accidental recovery from a transaction log that belongs to a + * Additionally, since Elasticsearch 2.0 the engine also records a {@link #TRANSLOG_UUID_KEY} with each commit to ensure a strong + * association between the lucene index an the transaction log file. This UUID is used to prevent accidental recovery from a transaction + * log that belongs to a * different engine. *

* Each Translog has only one translog file open for writes at any time referenced by a translog generation ID. This ID is written to a @@ -98,10 +99,13 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC /* * TODO - * - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this) but we can refactor as we go - * - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we need to be able to do random access reads even from the buffer + * - we might need something like a deletion policy to hold on to more than one translog eventually (I think sequence IDs needs this) + * but we can refactor as we go + * - use a simple BufferedOutputStream to write stuff and fold BufferedTranslogWriter into it's super class... the tricky bit is we + * need to be able to do random access reads even from the buffer * - we need random exception on the FileSystem API tests for all this. - * - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already fsynced far enough + * - we need to page align the last write before we sync, we can take advantage of ensureSynced for this since we might have already + * fsynced far enough */ public static final String TRANSLOG_GENERATION_KEY = "translog_generation"; public static final String TRANSLOG_UUID_KEY = "translog_uuid"; @@ -172,12 +176,15 @@ public Translog( // we hit this before and then blindly deleted the new generation even though we managed to bake it in and then hit this: // https://discuss.elastic.co/t/cannot-recover-index-because-of-missing-tanslog-files/38336 as an example // - // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists - // if not we don't even try to clean it up and wait until we fail creating it - assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; + // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that + // file exists. If not we don't even try to clean it up and wait until we fail creating it + assert Files.exists(nextTranslogFile) == false || + Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : + "unexpected translog file: [" + nextTranslogFile + "]"; if (Files.exists(currentCheckpointFile) // current checkpoint is already copied && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning - logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); + logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a" + + " tragic exception when creating a new generation", nextTranslogFile.getFileName()); } this.readers.addAll(recoverFromFiles(checkpoint)); if (readers.isEmpty()) { @@ -208,7 +215,8 @@ public Translog( private ArrayList recoverFromFiles(Checkpoint checkpoint) throws IOException { boolean success = false; ArrayList foundTranslogs = new ArrayList<>(); - final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX); // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work + // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work + final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX); boolean tempFileRenamed = false; try (ReleasableLock lock = writeLock.acquire()) { logger.debug("open uncommitted translog checkpoint {}", checkpoint); @@ -234,7 +242,8 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws throw new IllegalStateException("translog file doesn't exist with generation: " + i + " recovering from: " + minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive"); } - final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)))); + final TranslogReader reader = openReader(committedTranslogFile, + Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)))); assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() : "Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "]" + "translog path [ " + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]"; @@ -252,7 +261,8 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws if (Files.exists(commitCheckpoint)) { Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint); if (checkpoint.equals(checkpointFromDisk) == false) { - throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk); + throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + + " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk); } } else { // we first copy this into the temp-file and then fsync it followed by an atomic move into the target file @@ -283,7 +293,8 @@ private ArrayList recoverFromFiles(Checkpoint checkpoint) throws TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException { FileChannel channel = FileChannel.open(path, StandardOpenOption.READ); try { - assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation; + assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation; TranslogReader reader = TranslogReader.open(channel, path, checkpoint, translogUUID); channel = null; return reader; @@ -304,7 +315,8 @@ public static long parseIdFromFileName(Path translogFile) { try { return Long.parseLong(matcher.group(1)); } catch (NumberFormatException e) { - throw new IllegalStateException("number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " + fileName + "]", e); + throw new IllegalStateException("number formatting issue in a file that passed PARSE_STRICT_ID_PATTERN: " + + fileName + "]", e); } } throw new IllegalArgumentException("can't parse id from file: " + fileName); @@ -837,7 +849,8 @@ public TranslogStats stats() { // acquire lock to make the two numbers roughly consistent (no file change half way) try (ReleasableLock lock = readLock.acquire()) { final long uncommittedGen = deletionPolicy.getTranslogGenerationOfLastCommit(); - return new TranslogStats(totalOperations(), sizeInBytes(), totalOperationsByMinGen(uncommittedGen), sizeInBytesByMinGen(uncommittedGen), earliestLastModifiedAge()); + return new TranslogStats(totalOperations(), sizeInBytes(), totalOperationsByMinGen(uncommittedGen), + sizeInBytesByMinGen(uncommittedGen), earliestLastModifiedAge()); } } @@ -1160,8 +1173,8 @@ public Index(String type, String id, long seqNo, long primaryTerm, byte[] source this(type, id, seqNo, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, source, null, null, -1); } - public Index(String type, String id, long seqNo, long primaryTerm, long version, VersionType versionType, byte[] source, String routing, - String parent, long autoGeneratedIdTimestamp) { + public Index(String type, String id, long seqNo, long primaryTerm, long version, VersionType versionType, + byte[] source, String routing, String parent, long autoGeneratedIdTimestamp) { this.type = type; this.id = id; this.source = new BytesArray(source); @@ -1349,7 +1362,8 @@ private Delete(final StreamInput in) throws IOException { } public Delete(Engine.Delete delete, Engine.DeleteResult deleteResult) { - this(delete.type(), delete.id(), delete.uid(), deleteResult.getSeqNo(), delete.primaryTerm(), deleteResult.getVersion(), delete.versionType()); + this(delete.type(), delete.id(), delete.uid(), deleteResult.getSeqNo(), delete.primaryTerm(), + deleteResult.getVersion(), delete.versionType()); } /** utility for testing */ @@ -1793,7 +1807,8 @@ public boolean isCurrent(TranslogGeneration generation) { try (ReleasableLock lock = writeLock.acquire()) { if (generation != null) { if (generation.translogUUID.equals(translogUUID) == false) { - throw new IllegalArgumentException("commit belongs to a different translog: " + generation.translogUUID + " vs. " + translogUUID); + throw new IllegalArgumentException("commit belongs to a different translog: " + + generation.translogUUID + " vs. " + translogUUID); } return generation.translogFileGeneration == currentFileGeneration(); } @@ -1910,12 +1925,14 @@ static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, S ChannelFactory channelFactory, long primaryTerm) throws IOException { IOUtils.rm(location); Files.createDirectories(location); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1); + final Checkpoint checkpoint = + Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1); final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); IOUtils.fsync(checkpointFile, false); final String translogUUID = UUIDs.randomBase64UUID(); - TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory, + TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, + location.resolve(getFilename(1)), channelFactory, new ByteSizeValue(10), 1, initialGlobalCheckpoint, () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm, new TragicExceptionHolder()); diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index 4091fa45762e1..a75f49972a5d2 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -129,7 +129,8 @@ protected void readBytes(ByteBuffer buffer, long position) throws IOException { throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]"); } if (position < getFirstOperationOffset()) { - throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "]"); + throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + + getFirstOperationOffset() + "]"); } Channels.readFromFileChannelWithEofException(channel, position, buffer); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index 8fe92bba0097c..bff3e4eb2f540 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -94,10 +94,12 @@ public long sizeInBytes() { */ protected void readBytes(ByteBuffer buffer, long position) throws IOException { if (position >= length) { - throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "], generation: [" + getGeneration() + "], path: [" + path + "]"); + throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "], generation: [" + + getGeneration() + "], path: [" + path + "]"); } if (position < getFirstOperationOffset()) { - throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "], generation: [" + getGeneration() + "], path: [" + path + "]"); + throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + + getFirstOperationOffset() + "], generation: [" + getGeneration() + "], path: [" + path + "]"); } Channels.readFromFileChannelWithEofException(channel, position, buffer); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 48ee79ac08502..40089315dcb86 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -128,7 +128,8 @@ public static TranslogWriter create(ShardId shardId, String translogUUID, long f writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header, tragedy); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that - // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition + // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation + // is an error condition IOUtils.closeWhileHandlingException(channel); throw exception; } diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 95281468a5306..e358bc57798b4 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -19,9 +19,10 @@ package org.elasticsearch.indices; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -48,7 +49,9 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; -public class IndexingMemoryController extends AbstractComponent implements IndexingOperationListener, Closeable { +public class IndexingMemoryController implements IndexingOperationListener, Closeable { + + private static final Logger logger = LogManager.getLogger(IndexingMemoryController.class); /** How much heap (% or bytes) we will share across all actively indexing shards on this node (default: 10%). */ public static final Setting INDEX_BUFFER_SIZE_SETTING = diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 129b839bac75a..1dfbc3af42b49 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.Term; import org.apache.lucene.search.BulkScorer; @@ -30,7 +32,6 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.ShardCoreKeyMap; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -48,7 +49,9 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.function.Predicate; -public class IndicesQueryCache extends AbstractComponent implements QueryCache, Closeable { +public class IndicesQueryCache implements QueryCache, Closeable { + + private static final Logger logger = LogManager.getLogger(IndicesQueryCache.class); public static final Setting INDICES_CACHE_QUERY_SIZE_SETTING = Setting.memorySizeSetting("indices.queries.cache.size", "10%", Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java index be7601e645861..5939fca227b92 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesRequestCache.java @@ -21,6 +21,8 @@ import com.carrotsearch.hppc.ObjectHashSet; import com.carrotsearch.hppc.ObjectSet; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.Accountable; @@ -31,7 +33,6 @@ import org.elasticsearch.common.cache.CacheLoader; import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -62,8 +63,9 @@ * There are still several TODOs left in this class, some easily addressable, some more complex, but the support * is functional. */ -public final class IndicesRequestCache extends AbstractComponent implements RemovalListener, Closeable { +public final class IndicesRequestCache implements RemovalListener, Closeable { + + private static final Logger logger = LogManager.getLogger(IndicesRequestCache.class); /** * A setting to enable or disable request caching on an index level. Its dynamic by default @@ -296,7 +298,7 @@ synchronized void cleanCache() { CleanupKey cleanupKey = iterator.next(); iterator.remove(); if (cleanupKey.readerCacheKey == null || cleanupKey.entity.isOpen() == false) { - // -1 indicates full cleanup, as does a closed shard + // null indicates full cleanup, as does a closed shard currentFullClean.add(cleanupKey.entity.getCacheIdentity()); } else { currentKeysToClean.add(cleanupKey); diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index ee3da86553bf0..54e8bb22bb852 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; @@ -154,6 +155,7 @@ public class IndicesService extends AbstractLifecycleComponent implements IndicesClusterStateService.AllocatedIndices, IndexService.ShardStoreDeleter { + private static final Logger logger = LogManager.getLogger(IndicesService.class); public static final String INDICES_SHARDS_CLOSED_TIMEOUT = "indices.shards_closed_timeout"; public static final Setting INDICES_CACHE_CLEAN_INTERVAL_SETTING = diff --git a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java index 8c2eb9b67b8df..08232a5ef62e1 100644 --- a/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java +++ b/server/src/main/java/org/elasticsearch/indices/analysis/HunspellService.java @@ -18,13 +18,14 @@ */ package org.elasticsearch.indices.analysis; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.store.Directory; import org.apache.lucene.store.SimpleFSDirectory; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -74,7 +75,9 @@ * * @see org.elasticsearch.index.analysis.HunspellTokenFilterFactory */ -public class HunspellService extends AbstractComponent { +public class HunspellService { + + private static final Logger logger = LogManager.getLogger(HunspellService.class); public static final Setting HUNSPELL_LAZY_LOAD = Setting.boolSetting("indices.analysis.hunspell.dictionary.lazy", Boolean.FALSE, Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java index 93e5ad74ad83a..cc6b7d5ef85bc 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.breaker; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; @@ -28,6 +30,7 @@ * that load field data. */ public abstract class CircuitBreakerService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(CircuitBreakerService.class); protected CircuitBreakerService(Settings settings) { super(settings); diff --git a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java index 8f776f4cf12da..784168fba9e30 100644 --- a/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java +++ b/server/src/main/java/org/elasticsearch/indices/breaker/HierarchyCircuitBreakerService.java @@ -20,6 +20,7 @@ package org.elasticsearch.indices.breaker; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.breaker.ChildMemoryCircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; @@ -42,6 +43,7 @@ * if tripped */ public class HierarchyCircuitBreakerService extends CircuitBreakerService { + private static final Logger logger = LogManager.getLogger(HierarchyCircuitBreakerService.class); private static final String CHILD_LOGGER_PREFIX = "org.elasticsearch.indices.breaker."; diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index cd32c415ea54b..701690ed1d4f0 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.cluster; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.LockObtainFailedException; @@ -96,6 +97,7 @@ import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED; public class IndicesClusterStateService extends AbstractLifecycleComponent implements ClusterStateApplier { + private static final Logger logger = LogManager.getLogger(IndicesClusterStateService.class); final AllocatedIndices> indicesService; private final ClusterService clusterService; diff --git a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java index a5945187de154..9d26c0fad0184 100644 --- a/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java +++ b/server/src/main/java/org/elasticsearch/indices/fielddata/cache/IndicesFieldDataCache.java @@ -20,6 +20,8 @@ package org.elasticsearch.indices.fielddata.cache; import java.util.Collections; + +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -31,7 +33,6 @@ import org.elasticsearch.common.cache.CacheBuilder; import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.settings.Setting; @@ -50,7 +51,9 @@ import java.util.List; import java.util.function.ToLongBiFunction; -public class IndicesFieldDataCache extends AbstractComponent implements RemovalListener, Releasable{ +public class IndicesFieldDataCache implements RemovalListener, Releasable{ + + private static final Logger logger = LogManager.getLogger(IndicesFieldDataCache.class); public static final Setting INDICES_FIELDDATA_CACHE_SIZE_KEY = Setting.memorySizeSetting("indices.fielddata.cache.size", new ByteSizeValue(-1), Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 8cfbdabe2d11a..4a420b8c0ccae 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.indices.flush; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -36,7 +38,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -70,7 +71,9 @@ import java.util.Map; import java.util.concurrent.ConcurrentMap; -public class SyncedFlushService extends AbstractComponent implements IndexEventListener { +public class SyncedFlushService implements IndexEventListener { + + private static final Logger logger = LogManager.getLogger(SyncedFlushService.class); private static final String PRE_SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/pre"; private static final String SYNCED_FLUSH_ACTION_NAME = "internal:indices/flush/synced/sync"; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java index d709bf7807839..83f8649777d6f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoverySourceService.java @@ -19,10 +19,11 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -47,7 +48,9 @@ * The source recovery accepts recovery requests from other peer shards and start the recovery process from this * source shard to the target shard. */ -public class PeerRecoverySourceService extends AbstractComponent implements IndexEventListener { +public class PeerRecoverySourceService implements IndexEventListener { + + private static final Logger logger = LogManager.getLogger(PeerRecoverySourceService.class); public static class Actions { public static final String START_RECOVERY = "internal:index/shard/recovery/start_recovery"; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index 60e327df03164..899f19ce656f6 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.DirectoryReader; @@ -34,7 +35,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -78,7 +78,9 @@ * Note, it can be safely assumed that there will only be a single recovery per shard (index+id) and * not several of them (since we don't allocate several shard replicas to the same node). */ -public class PeerRecoveryTargetService extends AbstractComponent implements IndexEventListener { +public class PeerRecoveryTargetService implements IndexEventListener { + + private static final Logger logger = LogManager.getLogger(PeerRecoveryTargetService.class); public static class Actions { public static final String FILES_INFO = "internal:index/shard/recovery/filesInfo"; diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 3cdb371c6f36d..31ecd4455b165 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -19,9 +19,10 @@ package org.elasticsearch.indices.recovery; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -30,7 +31,9 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -public class RecoverySettings extends AbstractComponent { +public class RecoverySettings { + + private static final Logger logger = LogManager.getLogger(RecoverySettings.class); public static final Setting INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING = Setting.byteSizeSetting("indices.recovery.max_bytes_per_sec", new ByteSizeValue(40, ByteSizeUnit.MB), diff --git a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index dc5826b83ea8a..e86a8fe191aea 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/server/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -19,6 +19,8 @@ package org.elasticsearch.indices.store; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; @@ -34,7 +36,6 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,7 +70,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -public class IndicesStore extends AbstractComponent implements ClusterStateListener, Closeable { +public class IndicesStore implements ClusterStateListener, Closeable { + + private static final Logger logger = LogManager.getLogger(IndicesStore.class); // TODO this class can be foled into either IndicesService and partially into IndicesClusterStateService // there is no need for a separate public service diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java index 16ab96be06786..69ca66f169534 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsProbe.java @@ -19,6 +19,8 @@ package org.elasticsearch.monitor.fs; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Constants; import org.elasticsearch.cluster.ClusterInfo; @@ -26,7 +28,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment.NodePath; @@ -40,7 +41,9 @@ import java.util.Map; import java.util.Set; -public class FsProbe extends AbstractComponent { +public class FsProbe { + + private static final Logger logger = LogManager.getLogger(FsProbe.class); private final NodeEnvironment nodeEnv; diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsService.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsService.java index 66058c9f79c30..348779ff521b5 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsService.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsService.java @@ -19,10 +19,10 @@ package org.elasticsearch.monitor.fs; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -33,7 +33,9 @@ import java.io.IOException; -public class FsService extends AbstractComponent { +public class FsService { + + private static final Logger logger = LogManager.getLogger(FsService.class); private final FsProbe probe; private final TimeValue refreshInterval; diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java index 2c7235ca89954..94c66c79d2ee7 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java @@ -19,6 +19,7 @@ package org.elasticsearch.monitor.jvm; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Setting; @@ -41,6 +42,7 @@ import static java.util.Collections.unmodifiableMap; public class JvmGcMonitorService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(JvmGcMonitorService.class); private final ThreadPool threadPool; private final boolean enabled; diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java index eb6bd6f2f56b4..29b879ffe931f 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmService.java @@ -19,13 +19,16 @@ package org.elasticsearch.monitor.jvm; -import org.elasticsearch.common.component.AbstractComponent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -public class JvmService extends AbstractComponent { +public class JvmService { + + private static final Logger logger = LogManager.getLogger(JvmService.class); private final JvmInfo jvmInfo; diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsService.java b/server/src/main/java/org/elasticsearch/monitor/os/OsService.java index 3727b4dcd1860..57306ace25495 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsService.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsService.java @@ -19,7 +19,8 @@ package org.elasticsearch.monitor.os; -import org.elasticsearch.common.component.AbstractComponent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; @@ -29,7 +30,9 @@ import java.io.IOException; -public class OsService extends AbstractComponent { +public class OsService { + + private static final Logger logger = LogManager.getLogger(OsService.class); private final OsProbe probe; private final OsInfo info; diff --git a/server/src/main/java/org/elasticsearch/monitor/process/ProcessService.java b/server/src/main/java/org/elasticsearch/monitor/process/ProcessService.java index aba7993850af5..963d865638ca9 100644 --- a/server/src/main/java/org/elasticsearch/monitor/process/ProcessService.java +++ b/server/src/main/java/org/elasticsearch/monitor/process/ProcessService.java @@ -19,14 +19,17 @@ package org.elasticsearch.monitor.process; -import org.elasticsearch.common.component.AbstractComponent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.SingleObjectCache; -public final class ProcessService extends AbstractComponent { +public final class ProcessService { + + private static final Logger logger = LogManager.getLogger(ProcessService.class); private final ProcessProbe probe; private final ProcessInfo info; diff --git a/server/src/main/java/org/elasticsearch/node/NodeService.java b/server/src/main/java/org/elasticsearch/node/NodeService.java index fe9c3d59d6646..0567641b8a5d6 100644 --- a/server/src/main/java/org/elasticsearch/node/NodeService.java +++ b/server/src/main/java/org/elasticsearch/node/NodeService.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.search.SearchTransportService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.discovery.Discovery; @@ -45,7 +44,7 @@ import java.io.Closeable; import java.io.IOException; -public class NodeService extends AbstractComponent implements Closeable { +public class NodeService implements Closeable { private final Settings settings; private final ThreadPool threadPool; private final MonitorService monitorService; diff --git a/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java b/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java index 8885728927b34..4f9ec9488b430 100644 --- a/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java +++ b/server/src/main/java/org/elasticsearch/node/ResponseCollectorService.java @@ -24,7 +24,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.ExponentiallyWeightedMovingAverage; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -42,7 +41,7 @@ * tasks executed on each node, making the EWMA of the values available to the * coordinating node. */ -public final class ResponseCollectorService extends AbstractComponent implements ClusterStateListener { +public final class ResponseCollectorService implements ClusterStateListener { private static final double ALPHA = 0.3; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index dd4bd30b19f4a..a288f5f03d862 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -19,6 +19,8 @@ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; @@ -29,7 +31,6 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; @@ -41,7 +42,9 @@ /** * Component that runs only on the master node and is responsible for assigning running tasks to nodes */ -public class PersistentTasksClusterService extends AbstractComponent implements ClusterStateListener { +public class PersistentTasksClusterService implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(PersistentTasksClusterService.class); private final ClusterService clusterService; private final PersistentTasksExecutorRegistry registry; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java index a02efb6805729..ab674a79c4e52 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java @@ -22,7 +22,6 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.Assignment; import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import org.elasticsearch.tasks.TaskId; @@ -34,7 +33,7 @@ * An executor of tasks that can survive restart of requesting or executing node. * These tasks are using cluster state rather than only transport service to send requests and responses. */ -public abstract class PersistentTasksExecutor extends AbstractComponent { +public abstract class PersistentTasksExecutor { private final String executor; private final String taskName; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index a90415b530b43..260fabc67cdca 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -18,13 +18,14 @@ */ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -48,7 +49,9 @@ * This component is responsible for coordination of execution of persistent tasks on individual nodes. It runs on all * non-transport client nodes in the cluster and monitors cluster state changes to detect started commands. */ -public class PersistentTasksNodeService extends AbstractComponent implements ClusterStateListener { +public class PersistentTasksNodeService implements ClusterStateListener { + + private static final Logger logger = LogManager.getLogger(PersistentTasksNodeService.class); private final Map runningTasks = new HashMap<>(); private final PersistentTasksService persistentTasksService; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index ac80d1888bcc7..145435badc091 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -30,7 +32,6 @@ import org.elasticsearch.cluster.ClusterStateObserver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.NodeClosedException; @@ -46,7 +47,9 @@ * to the master node so that the master can update the cluster state and can track of the states * of the persistent tasks. */ -public class PersistentTasksService extends AbstractComponent { +public class PersistentTasksService { + + private static final Logger logger = LogManager.getLogger(PersistentTasksService.class); private static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; private static final String PERSISTENT_TASK_ORIGIN = "persistent_tasks"; diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java index 5dfdeb09095e3..2ad9a0892b8f6 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -33,7 +33,6 @@ import org.elasticsearch.bootstrap.JarHell; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.io.FileSystemUtils; @@ -70,7 +69,9 @@ import static org.elasticsearch.common.io.FileSystemUtils.isAccessibleDirectory; -public class PluginsService extends AbstractComponent { +public class PluginsService { + + private static final Logger logger = LogManager.getLogger(PluginsService.class); private final Settings settings; private final Path configPath; diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index e97f7acf168f7..295b64c554f6e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -19,6 +19,8 @@ package org.elasticsearch.repositories; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -32,7 +34,6 @@ import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; @@ -53,7 +54,9 @@ /** * Service responsible for maintaining and providing access to snapshot repositories on nodes. */ -public class RepositoriesService extends AbstractComponent implements ClusterStateApplier { +public class RepositoriesService implements ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(RepositoriesService.class); private final Map typesRegistry; diff --git a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 5c14c5a5781be..a73604145e79e 100644 --- a/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/server/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -21,12 +21,13 @@ import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.repositories.RepositoriesService.VerifyResponse; @@ -45,7 +46,10 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; -public class VerifyNodeRepositoryAction extends AbstractComponent { +public class VerifyNodeRepositoryAction { + + private static final Logger logger = LogManager.getLogger(VerifyNodeRepositoryAction.class); + public static final String ACTION_NAME = "internal:admin/repository/verify"; private final TransportService transportService; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 3e43868072cd7..40ddec4d89e26 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -19,6 +19,8 @@ package org.elasticsearch.repositories.blobstore; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexCommit; @@ -170,6 +172,7 @@ * */ public abstract class BlobStoreRepository extends AbstractLifecycleComponent implements Repository { + private static final Logger logger = LogManager.getLogger(BlobStoreRepository.class); protected final RepositoryMetaData metadata; diff --git a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java index 7abddafac4ed7..3db5195097145 100644 --- a/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/fs/FsRepository.java @@ -19,6 +19,8 @@ package org.elasticsearch.repositories.fs; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -46,6 +48,7 @@ * */ public class FsRepository extends BlobStoreRepository { + private static final Logger logger = LogManager.getLogger(FsRepository.class); public static final String TYPE = "fs"; diff --git a/server/src/main/java/org/elasticsearch/rest/RestController.java b/server/src/main/java/org/elasticsearch/rest/RestController.java index 98dc631324c8f..f73e9b3cb6a8c 100644 --- a/server/src/main/java/org/elasticsearch/rest/RestController.java +++ b/server/src/main/java/org/elasticsearch/rest/RestController.java @@ -19,6 +19,8 @@ package org.elasticsearch.rest; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.client.node.NodeClient; @@ -26,7 +28,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.path.PathTrie; @@ -60,7 +61,9 @@ import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.BytesRestResponse.TEXT_CONTENT_TYPE; -public class RestController extends AbstractComponent implements HttpServerTransport.Dispatcher { +public class RestController implements HttpServerTransport.Dispatcher { + + private static final Logger logger = LogManager.getLogger(RestController.class); private final PathTrie handlers = new PathTrie<>(RestUtils.REST_DECODER); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptService.java b/server/src/main/java/org/elasticsearch/script/ScriptService.java index 6838231146cb1..f33738bd2be49 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptService.java @@ -19,6 +19,8 @@ package org.elasticsearch.script; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; @@ -38,7 +40,6 @@ import org.elasticsearch.common.cache.RemovalListener; import org.elasticsearch.common.cache.RemovalNotification; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; @@ -57,7 +58,9 @@ import java.util.Set; import java.util.function.Function; -public class ScriptService extends AbstractComponent implements Closeable, ClusterStateApplier { +public class ScriptService implements Closeable, ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(ScriptService.class); static final String DISABLE_DYNAMIC_SCRIPTING_SETTING = "script.disable_dynamic"; diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index c09b979f6ff1b..7f1f3a6e484d8 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -19,6 +19,8 @@ package org.elasticsearch.search; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.core.internal.io.IOUtils; @@ -120,6 +122,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes; public class SearchService extends AbstractLifecycleComponent implements IndexEventListener { + private static final Logger logger = LogManager.getLogger(SearchService.class); // we can have 5 minutes here, since we make sure to clean with search requests and when shard/index closes public static final Setting DEFAULT_KEEPALIVE_SETTING = diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/package-info.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/package-info.java new file mode 100644 index 0000000000000..1e3093bd36877 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/package-info.java @@ -0,0 +1,23 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/** + * Histogram module for different bucket specifications used in aggregation. + */ +package org.elasticsearch.search.aggregations.bucket.histogram; diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 7e1873c3f0a6e..34046c205afcb 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -22,6 +22,7 @@ import com.carrotsearch.hppc.IntSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; @@ -56,7 +57,6 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.regex.Regex; @@ -115,7 +115,9 @@ * which removes {@link RestoreInProgress} when all shards are completed. In case of * restore failure a normal recovery fail-over process kicks in. */ -public class RestoreService extends AbstractComponent implements ClusterStateApplier { +public class RestoreService implements ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(RestoreService.class); private static final Set UNMODIFIABLE_SETTINGS = unmodifiableSet(newHashSet( SETTING_NUMBER_OF_SHARDS, diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index b0af54b71a191..9af5089beba47 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -20,6 +20,8 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ExceptionsHelper; @@ -95,6 +97,7 @@ * starting and stopping shard level snapshots */ public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener { + private static final Logger logger = LogManager.getLogger(SnapshotShardsService.class); public static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME_V6 = "internal:cluster/snapshot/update_snapshot"; public static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME = "internal:cluster/snapshot/update_snapshot_status"; diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index a51528eb7d09e..1f276e191846f 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -21,6 +21,8 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ExceptionsHelper; @@ -102,6 +104,7 @@ * */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { + private static final Logger logger = LogManager.getLogger(SnapshotsService.class); private final ClusterService clusterService; diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java index 3d0c14130373e..762ff7045e2c5 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskManager.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskManager.java @@ -19,6 +19,8 @@ package org.elasticsearch.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; @@ -29,7 +31,6 @@ import org.elasticsearch.cluster.ClusterStateApplier; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -55,7 +56,10 @@ /** * Task Manager service for keeping track of currently running tasks on the nodes */ -public class TaskManager extends AbstractComponent implements ClusterStateApplier { +public class TaskManager implements ClusterStateApplier { + + private static final Logger logger = LogManager.getLogger(TaskManager.class); + private static final TimeValue WAIT_FOR_COMPLETION_POLL = timeValueMillis(100); /** Rest headers that are copied to the task */ diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index f500e9561d808..136a551ab7dea 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; @@ -35,7 +37,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; @@ -53,7 +54,9 @@ /** * Service that can store task results. */ -public class TaskResultsService extends AbstractComponent { +public class TaskResultsService { + + private static final Logger logger = LogManager.getLogger(TaskResultsService.class); public static final String TASK_INDEX = ".tasks"; diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index b9ebdf94dacef..7654fdedb2b24 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -19,12 +19,13 @@ package org.elasticsearch.threadpool; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.Counter; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -60,7 +61,9 @@ import static java.util.Collections.unmodifiableMap; -public class ThreadPool extends AbstractComponent implements Scheduler, Closeable { +public class ThreadPool implements Scheduler, Closeable { + + private static final Logger logger = LogManager.getLogger(ThreadPool.class); public static class Names { public static final String SAME = "same"; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index a7750920cfade..f66ed4572661f 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.SettingUpgrader; @@ -54,7 +53,7 @@ /** * Base class for all services and components that need up-to-date information about the registered remote clusters */ -public abstract class RemoteClusterAware extends AbstractComponent { +public abstract class RemoteClusterAware { public static final Setting.AffixSetting> SEARCH_REMOTE_CLUSTERS_SEEDS = Setting.affixKeySetting( diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 481cf8b27fe4a..69000ade292ea 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; @@ -37,7 +39,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; @@ -86,7 +87,9 @@ * {@link RemoteClusterService#REMOTE_CONNECTIONS_PER_CLUSTER} until either all eligible nodes are exhausted or the maximum number of * connections per cluster has been reached. */ -final class RemoteClusterConnection extends AbstractComponent implements TransportConnectionListener, Closeable { +final class RemoteClusterConnection implements TransportConnectionListener, Closeable { + + private static final Logger logger = LogManager.getLogger(RemoteClusterConnection.class); private final TransportService transportService; private final ConnectionManager connectionManager; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index ad584056b8cc8..c7310241aeb8b 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -19,6 +19,8 @@ package org.elasticsearch.transport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -66,6 +68,7 @@ * Basic service for accessing remote clusters via gateway nodes */ public final class RemoteClusterService extends RemoteClusterAware implements Closeable { + private static final Logger logger = LogManager.getLogger(RemoteClusterService.class); public static final Setting SEARCH_REMOTE_CONNECTIONS_PER_CLUSTER = Setting.intSetting("search.remote.connections_per_cluster", 3, 1, Setting.Property.NodeScope, Setting.Property.Deprecated); diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 097efc109bc0b..a72251fae5a06 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -20,6 +20,8 @@ import com.carrotsearch.hppc.IntHashSet; import com.carrotsearch.hppc.IntSet; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -114,6 +116,7 @@ import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { + private static final Logger logger = LogManager.getLogger(TcpTransport.class); public static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = "transport_worker"; diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index 6201d0b3f49d4..2c709bca38e51 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -19,6 +19,7 @@ package org.elasticsearch.transport; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; @@ -80,6 +81,7 @@ import static org.elasticsearch.common.settings.Setting.timeSetting; public class TransportService extends AbstractLifecycleComponent implements TransportMessageListener, TransportConnectionListener { + private static final Logger logger = LogManager.getLogger(TransportService.class); public static final Setting CONNECTIONS_PER_NODE_RECOVERY = intSetting("transport.connections_per_node.recovery", 2, 1, Setting.Property.NodeScope); diff --git a/server/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java b/server/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java index 997f7e845e287..3edab68e1580f 100644 --- a/server/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java +++ b/server/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.watcher; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Setting; @@ -41,6 +43,7 @@ * defaults to {@code 60s}. The service can be disabled by setting {@code resource.reload.enabled} setting to {@code false}. */ public class ResourceWatcherService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(ResourceWatcherService.class); public enum Frequency { diff --git a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java index 0aa84ad806998..be7b6637ebe2e 100644 --- a/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java +++ b/server/src/test/java/org/elasticsearch/action/RejectionActionIT.java @@ -87,7 +87,8 @@ public void onFailure(Exception e) { if (response instanceof SearchResponse) { SearchResponse searchResponse = (SearchResponse) response; for (ShardSearchFailure failure : searchResponse.getShardFailures()) { - assertTrue("got unexpected reason..." + failure.reason(), failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected")); + assertTrue("got unexpected reason..." + failure.reason(), + failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected")); } } else { Exception t = (Exception) response; @@ -95,7 +96,8 @@ public void onFailure(Exception e) { if (unwrap instanceof SearchPhaseExecutionException) { SearchPhaseExecutionException e = (SearchPhaseExecutionException) unwrap; for (ShardSearchFailure failure : e.shardFailures()) { - assertTrue("got unexpected reason..." + failure.reason(), failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected")); + assertTrue("got unexpected reason..." + failure.reason(), + failure.reason().toLowerCase(Locale.ENGLISH).contains("rejected")); } } else if ((unwrap instanceof EsRejectedExecutionException) == false) { throw new AssertionError("unexpected failure", (Throwable) response); diff --git a/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java b/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java index 9f5d40c6709f0..7c00705b2a28e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/HotThreadsIT.java @@ -120,7 +120,8 @@ public void onFailure(Exception e) { assertHitCount( client().prepareSearch() .setQuery(matchAllQuery()) - .setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery().must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) + .setPostFilter(boolQuery().must(matchAllQuery()).mustNot(boolQuery() + .must(termQuery("field1", "value1")).must(termQuery("field1", "value2")))) .get(), 3L); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 8c1438815250a..5d497cdaa5c5b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -70,7 +70,8 @@ public void testClusterHealth() throws IOException { int inFlight = randomIntBetween(0, 200); int delayedUnassigned = randomIntBetween(0, 200); TimeValue pendingTaskInQueueTime = TimeValue.timeValueMillis(randomIntBetween(1000, 100000)); - ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", new String[] {MetaData.ALL}, clusterState, pendingTasks, inFlight, delayedUnassigned, pendingTaskInQueueTime); + ClusterHealthResponse clusterHealth = new ClusterHealthResponse("bla", + new String[] {MetaData.ALL}, clusterState, pendingTasks, inFlight, delayedUnassigned, pendingTaskInQueueTime); clusterHealth = maybeSerialize(clusterHealth); assertClusterHealth(clusterHealth); assertThat(clusterHealth.getNumberOfPendingTasks(), Matchers.equalTo(pendingTasks)); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java index 43d94f56e5af3..795203d8bc4b9 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksIT.java @@ -66,7 +66,8 @@ public void testVerifyRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); + VerifyRepositoryResponse response = client().admin().cluster() + .prepareVerifyRepository("test-repo-blocks").execute().actionGet(); assertThat(response.getNodes().size(), equalTo(cluster().numDataAndMasterNodes())); } finally { setClusterReadOnly(false); @@ -100,7 +101,8 @@ public void testGetRepositoryWithBlocks() { // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. try { setClusterReadOnly(true); - GetRepositoriesResponse response = client().admin().cluster().prepareGetRepositories("test-repo-blocks").execute().actionGet(); + GetRepositoriesResponse response = client().admin().cluster() + .prepareGetRepositories("test-repo-blocks").execute().actionGet(); assertThat(response.repositories(), hasSize(1)); } finally { setClusterReadOnly(false); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java index 5fcd369a8a433..a3db4eca12131 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/SettingsUpdaterTests.java @@ -57,7 +57,8 @@ public void testUpdateSetting() { .transientSettings(Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 3.5) .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); ClusterState build = builder.metaData(metaData).build(); - ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5).build(), + ClusterState clusterState = updater.updateSettings(build, Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.5).build(), Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 0.4).build(), logger); assertNotSame(clusterState, build); assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 0.4, 0.1); @@ -73,7 +74,8 @@ public void testUpdateSetting() { assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().transientSettings())); clusterState = updater.updateSettings(clusterState, - Settings.EMPTY, Settings.builder().putNull("cluster.routing.*").put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 10.0).build(), logger); + Settings.EMPTY, Settings.builder().putNull("cluster.routing.*") + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 10.0).build(), logger); assertEquals(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.get(clusterState.metaData().persistentSettings()), 10.0, 0.1); assertFalse(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.exists(clusterState.metaData().persistentSettings())); @@ -99,8 +101,10 @@ public void testAllOrNothing() { ClusterState build = builder.metaData(metaData).build(); try { - updater.updateSettings(build, Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").build(), - Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); + updater.updateSettings(build, Settings.builder() + .put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float").build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), "not a float") + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); fail("all or nothing"); } catch (IllegalArgumentException ex) { logger.info("", ex); @@ -125,8 +129,10 @@ public void testClusterBlock() { .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 4.5).build()); ClusterState build = builder.metaData(metaData).build(); - ClusterState clusterState = updater.updateSettings(build, Settings.builder().put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build(), - Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6).put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); + ClusterState clusterState = updater.updateSettings(build, Settings.builder() + .put(MetaData.SETTING_READ_ONLY_SETTING.getKey(), true).build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); assertEquals(clusterState.blocks().global().size(), 1); assertEquals(clusterState.blocks().global().iterator().next(), MetaData.CLUSTER_READ_ONLY_BLOCK); @@ -135,8 +141,10 @@ public void testClusterBlock() { assertEquals(clusterState.blocks().global().size(), 0); - clusterState = updater.updateSettings(build, Settings.builder().put(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build(), - Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6).put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); + clusterState = updater.updateSettings(build, Settings.builder() + .put(MetaData.SETTING_READ_ONLY_ALLOW_DELETE_SETTING.getKey(), true).build(), + Settings.builder().put(BalancedShardsAllocator.INDEX_BALANCE_FACTOR_SETTING.getKey(), 1.6) + .put(BalancedShardsAllocator.SHARD_BALANCE_FACTOR_SETTING.getKey(), 1.0f).build(), logger); assertEquals(clusterState.blocks().global().size(), 1); assertEquals(clusterState.blocks().global().iterator().next(), MetaData.CLUSTER_READ_ONLY_ALLOW_DELETE_BLOCK); clusterState = updater.updateSettings(build, Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java index dd875fbc4980a..f51c2b7b172c4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksIT.java @@ -88,7 +88,8 @@ public void testCreateSnapshotWithBlocks() { logger.info("--> creating a snapshot is allowed when the cluster is read only"); try { setClusterReadOnly(true); - assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + .setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { setClusterReadOnly(false); } @@ -104,7 +105,8 @@ public void testCreateSnapshotWithIndexBlocks() { logger.info("--> creating a snapshot is not blocked when an index is read only"); try { enableIndexBlock(INDEX_NAME, SETTING_READ_ONLY); - assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1").setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + .setIndices(COMMON_INDEX_NAME_MASK).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { disableIndexBlock(INDEX_NAME, SETTING_READ_ONLY); } @@ -112,9 +114,11 @@ public void testCreateSnapshotWithIndexBlocks() { logger.info("--> creating a snapshot is blocked when an index is blocked for reads"); try { enableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ); - assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").setIndices(COMMON_INDEX_NAME_MASK), IndexMetaData.INDEX_READ_BLOCK); + assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + .setIndices(COMMON_INDEX_NAME_MASK), IndexMetaData.INDEX_READ_BLOCK); logger.info("--> creating a snapshot is not blocked when an read-blocked index is not part of the snapshot"); - assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2").setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); + assertThat(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-2") + .setIndices(OTHER_INDEX_NAME).setWaitForCompletion(true).get().status(), equalTo(RestStatus.OK)); } finally { disableIndexBlock(INDEX_NAME, SETTING_BLOCKS_READ); } @@ -137,7 +141,8 @@ public void testRestoreSnapshotWithBlocks() { logger.info("--> restoring a snapshot is blocked when the cluster is read only"); try { setClusterReadOnly(true); - assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), MetaData.CLUSTER_READ_ONLY_BLOCK); + assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), + MetaData.CLUSTER_READ_ONLY_BLOCK); } finally { setClusterReadOnly(false); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java index b515829b72ac5..063b78fccad05 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -40,7 +40,8 @@ public void testSerialization() throws Exception { ClusterStateRequest clusterStateRequest = new ClusterStateRequest().routingTable(randomBoolean()).metaData(randomBoolean()) .nodes(randomBoolean()).blocks(randomBoolean()).indices("testindex", "testindex2").indicesOptions(indicesOptions); - Version testVersion = VersionUtils.randomVersionBetween(random(), Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); + Version testVersion = VersionUtils.randomVersionBetween(random(), + Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT); BytesStreamOutput output = new BytesStreamOutput(); output.setVersion(testVersion); clusterStateRequest.writeTo(output); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java index 4bb6a5f3a8c41..bf77cdeebd067 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -54,7 +54,8 @@ private void assertCounts(ClusterStatsNodes.Counts counts, int total, Map> nodePlugins() { + return Arrays.asList(IngestTestPlugin.class); + } + public void testBulkIndexCreatesMapping() throws Exception { String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/bulk-log.json"); BulkRequestBuilder bulkBuilder = client().prepareBulk(); @@ -80,4 +98,52 @@ public void testBulkWithWriteIndexAndRouting() { assertFalse(bulkResponse.buildFailureMessage(), bulkResponse.hasFailures()); assertFalse(client().prepareGet("index3", "type", "id").setRouting("1").get().isExists()); } + + public void testBulkWithGlobalDefaults() throws Exception { + // all requests in the json are missing index and type parameters: "_index" : "test", "_type" : "type1", + String bulkAction = copyToStringFromClasspath("/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json"); + { + BulkRequestBuilder bulkBuilder = client().prepareBulk(); + bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON); + ActionRequestValidationException ex = expectThrows(ActionRequestValidationException.class, bulkBuilder::get); + + assertThat(ex.validationErrors(), containsInAnyOrder( + "index is missing", + "index is missing", + "index is missing", + "type is missing", + "type is missing", + "type is missing")); + } + + { + createSamplePipeline("pipeline"); + BulkRequestBuilder bulkBuilder = client().prepareBulk("test","type1") + .routing("routing") + .pipeline("pipeline"); + + bulkBuilder.add(bulkAction.getBytes(StandardCharsets.UTF_8), 0, bulkAction.length(), null, null, XContentType.JSON); + BulkResponse bulkItemResponses = bulkBuilder.get(); + assertFalse(bulkItemResponses.hasFailures()); + } + } + + private void createSamplePipeline(String pipelineId) throws IOException, ExecutionException, InterruptedException { + XContentBuilder pipeline = jsonBuilder() + .startObject() + .startArray("processors") + .startObject() + .startObject("test") + .endObject() + .endObject() + .endArray() + .endObject(); + + AcknowledgedResponse acknowledgedResponse = client().admin() + .cluster() + .putPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(pipeline), XContentType.JSON)) + .get(); + + assertTrue(acknowledgedResponse.isAcknowledged()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index 3fbfa381ad352..6a7d9bc02ec3e 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -72,19 +72,9 @@ public void testBulkProcessorFlushPreservesContext() throws InterruptedException try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().stashContext()) { threadPool.getThreadContext().putHeader(headerKey, headerValue); threadPool.getThreadContext().putTransient(transientKey, transientValue); - bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), new BulkProcessor.Listener() { - @Override - public void beforeBulk(long executionId, BulkRequest request) { - } - - @Override - public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - } - - @Override - public void afterBulk(long executionId, BulkRequest request, Throwable failure) { - } - }, 1, bulkSize, new ByteSizeValue(5, ByteSizeUnit.MB), flushInterval, threadPool, () -> {}); + bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), emptyListener(), + 1, bulkSize, new ByteSizeValue(5, ByteSizeUnit.MB), flushInterval, + threadPool, () -> {}, BulkRequest::new); } assertNull(threadPool.getThreadContext().getHeader(headerKey)); assertNull(threadPool.getThreadContext().getTransient(transientKey)); @@ -100,28 +90,32 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) bulkProcessor.close(); } + public void testAwaitOnCloseCallsOnClose() throws Exception { final AtomicBoolean called = new AtomicBoolean(false); - BulkProcessor bulkProcessor = new BulkProcessor((request, listener) -> { - }, BackoffPolicy.noBackoff(), new BulkProcessor.Listener() { + BiConsumer> consumer = (request, listener) -> {}; + BulkProcessor bulkProcessor = new BulkProcessor(consumer, BackoffPolicy.noBackoff(), emptyListener(), + 0, 10, new ByteSizeValue(1000), null, + (delay, executor, command) -> null, () -> called.set(true), BulkRequest::new); + + assertFalse(called.get()); + bulkProcessor.awaitClose(100, TimeUnit.MILLISECONDS); + assertTrue(called.get()); + } + + private BulkProcessor.Listener emptyListener() { + return new BulkProcessor.Listener() { @Override public void beforeBulk(long executionId, BulkRequest request) { - } @Override public void afterBulk(long executionId, BulkRequest request, BulkResponse response) { - } @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { - } - }, 0, 10, new ByteSizeValue(1000), null, (delay, executor, command) -> null, () -> called.set(true)); - - assertFalse(called.get()); - bulkProcessor.awaitClose(100, TimeUnit.MILLISECONDS); - assertTrue(called.get()); + }; } } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java index eb2f4b6904d90..135380b54675d 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java @@ -64,13 +64,16 @@ public void testQueryBuilderQueryToString() { public void testSearchSourceBuilderToString() { SearchRequestBuilder searchRequestBuilder = client.prepareSearch(); searchRequestBuilder.setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); - assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value")).toString())); + assertThat(searchRequestBuilder.toString(), + equalTo(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value")).toString())); } public void testThatToStringDoesntWipeRequestSource() { - SearchRequestBuilder searchRequestBuilder = client.prepareSearch().setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); + SearchRequestBuilder searchRequestBuilder = client.prepareSearch() + .setSource(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value"))); String preToString = searchRequestBuilder.request().toString(); - assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value")).toString())); + assertThat(searchRequestBuilder.toString(), + equalTo(new SearchSourceBuilder().query(QueryBuilders.termQuery("field", "value")).toString())); String postToString = searchRequestBuilder.request().toString(); assertThat(preToString, equalTo(postToString)); } diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java b/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java index f9aad9ead9133..d25d4ba69ba54 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/AbstractTermVectorsTestCase.java @@ -272,8 +272,9 @@ protected TestConfig[] generateTestConfigs(int numberOfTests, TestDoc[] testDocs configs.add(config); } // always adds a test that fails - configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}).index("doesn't_exist").alias("doesn't_exist"), - new String[]{"doesnt_exist"}, true, true, true).expectedException(org.elasticsearch.index.IndexNotFoundException.class)); + configs.add(new TestConfig(new TestDoc("doesnt_exist", new TestFieldSetting[]{}, new String[]{}) + .index("doesn't_exist").alias("doesn't_exist"), + new String[]{"doesnt_exist"}, true, true, true).expectedException(org.elasticsearch.index.IndexNotFoundException.class)); refresh(); @@ -401,9 +402,10 @@ protected void validateResponse(TermVectorsResponse esResponse, Fields luceneFie } protected TermVectorsRequestBuilder getRequestForConfig(TestConfig config) { - return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.type, config.doc.id).setPayloads(config.requestPayloads) - .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true) - .setSelectedFields(config.selectedFields).setRealtime(false); + return client().prepareTermVectors(randomBoolean() ? config.doc.index : config.doc.alias, config.doc.type, config.doc.id) + .setPayloads(config.requestPayloads) + .setOffsets(config.requestOffsets).setPositions(config.requestPositions).setFieldStatistics(true).setTermStatistics(true) + .setSelectedFields(config.selectedFields).setRealtime(false); } protected Fields getTermVectorsFromLucene(DirectoryReader directoryReader, TestDoc doc) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java index b5a596401cbbc..a45012dc4b3de 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsIT.java @@ -765,7 +765,8 @@ private void checkAnalyzedFields(Fields fieldsObject, Set fieldNames, Ma // check overridden by keyword analyzer ... if (perFieldAnalyzer.containsKey(fieldName)) { TermsEnum iterator = terms.iterator(); - assertThat("Analyzer for " + fieldName + " should have been overridden!", iterator.next().utf8ToString(), equalTo("some text here")); + assertThat("Analyzer for " + fieldName + " should have been overridden!", + iterator.next().utf8ToString(), equalTo("some text here")); assertThat(iterator.next(), nullValue()); } validFields.add(fieldName); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java index 5ed4f3252d57c..baedd7061bd18 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/MultiTermVectorsIT.java @@ -116,7 +116,8 @@ public void testMultiTermVectorsWithVersion() throws Exception { //Version from Lucene index refresh(); response = client().prepareMultiTermVectors() - .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(Versions.MATCH_ANY).realtime(false)) + .add(new TermVectorsRequest(indexOrAlias(), "type1", "1") + .selectedFields("field").version(Versions.MATCH_ANY).realtime(false)) .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(1).realtime(false)) .add(new TermVectorsRequest(indexOrAlias(), "type1", "1").selectedFields("field").version(2).realtime(false)) .get(); diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index 2018218cc5456..3bdb02ac56a7b 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -310,7 +310,8 @@ public void testFieldTypeToTermVectorString() throws Exception { } catch (MapperParsingException e) { exceptiontrown = true; } - assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, equalTo(false)); + assertThat("TypeParsers.parseTermVector should accept string with_positions_payloads but does not.", exceptiontrown, + equalTo(false)); } public void testTermVectorStringGenerationWithoutPositions() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 989dc22ee0a73..b00cacd013fd8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -25,9 +25,11 @@ import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand; import org.elasticsearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.ImmutableOpenIntMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -54,6 +56,7 @@ import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -212,14 +215,26 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { rerouteBuilder.add(new AllocateEmptyPrimaryAllocationCommand(idxName, shardId, storeStatus.getNode().getId(), true)); } } - rerouteBuilder.get(); - - ClusterState state = client().admin().cluster().prepareState().get().getState(); - Set expectedAllocationIds = useStaleReplica + final Set expectedAllocationIds = useStaleReplica ? Collections.singleton(RecoverySource.ExistingStoreRecoverySource.FORCED_ALLOCATION_ID) : Collections.emptySet(); - assertEquals(expectedAllocationIds, state.metaData().index(idxName).inSyncAllocationIds(0)); + + final CountDownLatch clusterStateChangeLatch = new CountDownLatch(1); + final ClusterStateListener clusterStateListener = event -> { + final Set allocationIds = event.state().metaData().index(idxName).inSyncAllocationIds(0); + if (expectedAllocationIds.equals(allocationIds)) { + clusterStateChangeLatch.countDown(); + } + logger.info("expected allocation ids: {} actual allocation ids: {}", expectedAllocationIds, allocationIds); + }; + final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, master); + clusterService.addListener(clusterStateListener); + + rerouteBuilder.get(); + + assertTrue(clusterStateChangeLatch.await(30, TimeUnit.SECONDS)); + clusterService.removeListener(clusterStateListener); logger.info("--> check that the stale primary shard gets allocated and that documents are available"); ensureYellow(idxName); @@ -235,7 +250,7 @@ public void testForceStaleReplicaToBePromotedToPrimary() throws Exception { assertHitCount(client().prepareSearch(idxName).setSize(0).setQuery(matchAllQuery()).get(), useStaleReplica ? 1L : 0L); // allocation id of old primary was cleaned from the in-sync set - state = client().admin().cluster().prepareState().get().getState(); + final ClusterState state = client().admin().cluster().prepareState().get().getState(); assertEquals(Collections.singleton(state.routingTable().index(idxName).shard(0).primary.allocationId().getId()), state.metaData().index(idxName).inSyncAllocationIds(0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java index 4eca9f9d230a5..73da6ecd0184f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/ClusterApplierServiceTests.java @@ -119,23 +119,23 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", - clusterApplierService.getClass().getCanonicalName(), + ClusterApplierService.class.getCanonicalName(), Level.DEBUG, "*processing [test1]: took [1s] no change in cluster state")); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2", - clusterApplierService.getClass().getCanonicalName(), + ClusterApplierService.class.getCanonicalName(), Level.TRACE, "*failed to execute cluster state applier in [2s]*")); mockAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "test3", - clusterApplierService.getClass().getCanonicalName(), - Level.DEBUG, - "*processing [test3]: took [0s] no change in cluster state*")); + new MockLogAppender.SeenEventExpectation( + "test3", + ClusterApplierService.class.getCanonicalName(), + Level.DEBUG, + "*processing [test3]: took [0s] no change in cluster state*")); - Logger clusterLogger = LogManager.getLogger("org.elasticsearch.cluster.service"); + Logger clusterLogger = LogManager.getLogger(ClusterApplierService.class); Loggers.addAppender(clusterLogger, mockAppender); try { clusterApplierService.currentTimeOverride = System.nanoTime(); @@ -190,23 +190,23 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test1 shouldn't see because setting is too low", - clusterApplierService.getClass().getCanonicalName(), + ClusterApplierService.class.getCanonicalName(), Level.WARN, "*cluster state applier task [test1] took [*] above the warn threshold of *")); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2", - clusterApplierService.getClass().getCanonicalName(), + ClusterApplierService.class.getCanonicalName(), Level.WARN, "*cluster state applier task [test2] took [32s] above the warn threshold of *")); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - clusterApplierService.getClass().getCanonicalName(), + ClusterApplierService.class.getCanonicalName(), Level.WARN, "*cluster state applier task [test3] took [34s] above the warn threshold of *")); - Logger clusterLogger = LogManager.getLogger("org.elasticsearch.cluster.service"); + Logger clusterLogger = LogManager.getLogger(ClusterApplierService.class); Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java index 26e36afc551ee..b8b6b2dbde49d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/MasterServiceTests.java @@ -310,23 +310,23 @@ public void testClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test1", - masterService.getClass().getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "*processing [test1]: took [1s] no change in cluster state")); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2", - masterService.getClass().getCanonicalName(), + MasterService.class.getCanonicalName(), Level.TRACE, "*failed to execute cluster state update in [2s]*")); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3", - masterService.getClass().getCanonicalName(), + MasterService.class.getCanonicalName(), Level.DEBUG, "*processing [test3]: took [3s] done publishing updated cluster state (version: *, uuid: *)")); - Logger clusterLogger = LogManager.getLogger(masterService.getClass().getPackage().getName()); + Logger clusterLogger = LogManager.getLogger(MasterService.class); Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(4); @@ -651,29 +651,29 @@ public void testLongClusterStateUpdateLogging() throws Exception { mockAppender.addExpectation( new MockLogAppender.UnseenEventExpectation( "test1 shouldn't see because setting is too low", - masterService.getClass().getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*cluster state update task [test1] took [*] above the warn threshold of *")); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test2", - masterService.getClass().getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*cluster state update task [test2] took [32s] above the warn threshold of *")); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test3", - masterService.getClass().getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*cluster state update task [test3] took [33s] above the warn threshold of *")); mockAppender.addExpectation( new MockLogAppender.SeenEventExpectation( "test4", - masterService.getClass().getCanonicalName(), + MasterService.class.getCanonicalName(), Level.WARN, "*cluster state update task [test4] took [34s] above the warn threshold of *")); - Logger clusterLogger = LogManager.getLogger(masterService.getClass().getPackage().getName()); + Logger clusterLogger = LogManager.getLogger(MasterService.class); Loggers.addAppender(clusterLogger, mockAppender); try { final CountDownLatch latch = new CountDownLatch(5); diff --git a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java index ed310ee305acf..1c0329a51e32a 100644 --- a/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/zen/UnicastZenPingTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery.zen; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.Constants; import org.elasticsearch.Version; @@ -810,6 +811,8 @@ private static class NetworkHandle { private static class TestUnicastZenPing extends UnicastZenPing { + private static final Logger logger = LogManager.getLogger(TestUnicastZenPing.class); + TestUnicastZenPing(Settings settings, ThreadPool threadPool, NetworkHandle networkHandle, PingContextProvider contextProvider) { super(Settings.builder().put("node.name", networkHandle.node.getName()).put(settings).build(), diff --git a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index 49e6c6597e180..6bb799ac9ebb0 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -45,8 +45,10 @@ public class IndexingSlowLogTests extends ESTestCase { public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { - BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject()); - ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", + BytesReference source = BytesReference.bytes(JsonXContent.contentBuilder() + .startObject().field("foo", "bar").endObject()); + ParsedDocument pd = new ParsedDocument(new NumericDocValuesField("version", 1), + SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", "test", null, null, source, XContentType.JSON, null); Index index = new Index("foo", "123"); // Turning off document logging doesn't log source[] @@ -68,7 +70,8 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { // Throwing a error if source cannot be converted source = new BytesArray("invalid"); - pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", + pd = new ParsedDocument(new NumericDocValuesField("version", 1), + SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", "test", null, null, source, XContentType.JSON, null); p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); @@ -91,10 +94,12 @@ public void testReformatSetting() { IndexSettings settings = new IndexSettings(metaData, Settings.EMPTY); IndexingSlowLog log = new IndexingSlowLog(settings); assertFalse(log.isReformat()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "true").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "true").build())); assertTrue(log.isReformat()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "false").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "false").build())); assertFalse(log.isReformat()); settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY)); @@ -107,7 +112,8 @@ public void testReformatSetting() { log = new IndexingSlowLog(settings); assertTrue(log.isReformat()); try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "NOT A BOOLEAN").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_REFORMAT_SETTING.getKey(), "NOT A BOOLEAN").build())); fail(); } catch (IllegalArgumentException ex) { final String expected = "illegal value can't update [index.indexing.slowlog.reformat] from [true] to [NOT A BOOLEAN]"; @@ -115,7 +121,8 @@ public void testReformatSetting() { assertNotNull(ex.getCause()); assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); final IllegalArgumentException cause = (IllegalArgumentException) ex.getCause(); - assertThat(cause, hasToString(containsString("Failed to parse value [NOT A BOOLEAN] as only [true] or [false] are allowed."))); + assertThat(cause, + hasToString(containsString("Failed to parse value [NOT A BOOLEAN] as only [true] or [false] are allowed."))); } assertTrue(log.isReformat()); } @@ -130,14 +137,17 @@ public void testLevelSetting() { IndexingSlowLog log = new IndexingSlowLog(settings); assertEquals(level, log.getLevel()); level = randomFrom(SlowLogLevel.values()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build())); assertEquals(level, log.getLevel()); level = randomFrom(SlowLogLevel.values()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build())); assertEquals(level, log.getLevel()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), level).build())); assertEquals(level, log.getLevel()); settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY)); @@ -150,7 +160,8 @@ public void testLevelSetting() { log = new IndexingSlowLog(settings); assertTrue(log.isReformat()); try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), "NOT A LEVEL").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_LEVEL_SETTING.getKey(), "NOT A LEVEL").build())); fail(); } catch (IllegalArgumentException ex) { final String expected = "illegal value can't update [index.indexing.slowlog.level] from [TRACE] to [NOT A LEVEL]"; @@ -178,7 +189,8 @@ public void testSetLevels() { assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getIndexInfoThreshold()); assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getIndexWarnThreshold()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "120ms") + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "120ms") .put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "220ms") .put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "320ms") .put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "420ms").build())); @@ -206,28 +218,36 @@ public void testSetLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexInfoThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getIndexWarnThreshold()); try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_TRACE_SETTING.getKey(), "NOT A TIME VALUE") + .build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.trace"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_DEBUG_SETTING.getKey(), "NOT A TIME VALUE") + .build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.debug"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_INFO_SETTING.getKey(), "NOT A TIME VALUE") + .build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.info"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(IndexingSlowLog.INDEX_INDEXING_SLOWLOG_THRESHOLD_INDEX_WARN_SETTING.getKey(), "NOT A TIME VALUE") + .build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.indexing.slowlog.threshold.index.warn"); diff --git a/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java b/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java index 68869592485e5..ee306c0cdf182 100644 --- a/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java +++ b/server/src/test/java/org/elasticsearch/index/MergePolicySettingsTests.java @@ -40,11 +40,16 @@ public void testCompoundFileSettings() throws IOException { assertThat(new MergePolicyConfig(logger, indexSettings(build(true))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); assertThat(new MergePolicyConfig(logger, indexSettings(build(0.5))).getMergePolicy().getNoCFSRatio(), equalTo(0.5)); assertThat(new MergePolicyConfig(logger, indexSettings(build(1.0))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); - assertThat(new MergePolicyConfig(logger, indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new MergePolicyConfig(logger, + indexSettings(build("true"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new MergePolicyConfig(logger, + indexSettings(build("True"))).getMergePolicy().getNoCFSRatio(), equalTo(1.0)); + assertThat(new MergePolicyConfig(logger, + indexSettings(build("False"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new MergePolicyConfig(logger, + indexSettings(build("false"))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); + assertThat(new MergePolicyConfig(logger, + indexSettings(build(false))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); assertThat(new MergePolicyConfig(logger, indexSettings(build(0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); assertThat(new MergePolicyConfig(logger, indexSettings(build(0.0))).getMergePolicy().getNoCFSRatio(), equalTo(0.0)); } @@ -54,7 +59,8 @@ private static IndexSettings indexSettings(Settings settings) { } public void testNoMerges() { - MergePolicyConfig mp = new MergePolicyConfig(logger, indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build())); + MergePolicyConfig mp = new MergePolicyConfig(logger, + indexSettings(Settings.builder().put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).build())); assertTrue(mp.getMergePolicy() instanceof NoMergePolicy); } @@ -76,47 +82,81 @@ public void testUpdateSettings() throws IOException { public void testTieredMergePolicySettingsUpdate() throws IOException { IndexSettings indexSettings = indexSettings(Settings.EMPTY); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d); - - indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build())); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d); - - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0); - indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build())); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001); - - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE); - indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build())); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1); - - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT); - indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1).build())); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1); - - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001); - indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1)).build())); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001); - - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0); - indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build())); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0); - - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0); - indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build())); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), + MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d); + + indexSettings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_EXPUNGE_DELETES_ALLOWED_SETTING.getKey(), + MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d).build())); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), + MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED + 1.0d, 0.0d); + + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), + MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMbFrac(), 0); + indexSettings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_FLOOR_SEGMENT_SETTING.getKey(), + new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB)).build())); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), + new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb() + 1, ByteSizeUnit.MB).getMbFrac(), 0.001); + + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), + MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE); + indexSettings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_SETTING.getKey(), + MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1).build())); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), + MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE - 1); + + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), + MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT); + indexSettings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE_EXPLICIT_SETTING.getKey(), + MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT - 1).build())); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), + MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT-1); + + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), + MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getMbFrac(), 0.0001); + indexSettings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_MAX_MERGED_SEGMENT_SETTING.getKey(), + new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1)).build())); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), + new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001); + + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), + MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0); + indexSettings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_SEGMENTS_PER_TIER_SETTING.getKey(), + MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1).build())); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), + MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER + 1, 0); + + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), + MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0); + indexSettings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 22).build())); assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), 22, 0); IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> - indexSettings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build()))); + indexSettings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(MergePolicyConfig.INDEX_MERGE_POLICY_DELETES_PCT_ALLOWED_SETTING.getKey(), 53).build()))); final Throwable cause = exc.getCause(); assertThat(cause.getMessage(), containsString("must be <= 50.0")); indexSettings.updateIndexMetaData(newIndexMeta("index", EMPTY_SETTINGS)); // see if defaults are restored - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0); - assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getForceMergeDeletesPctAllowed(), + MergePolicyConfig.DEFAULT_EXPUNGE_DELETES_ALLOWED, 0.0d); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getFloorSegmentMB(), + new ByteSizeValue(MergePolicyConfig.DEFAULT_FLOOR_SEGMENT.getMb(), ByteSizeUnit.MB).getMbFrac(), 0.00); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnce(), + MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergeAtOnceExplicit(), + MergePolicyConfig.DEFAULT_MAX_MERGE_AT_ONCE_EXPLICIT); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getMaxMergedSegmentMB(), + new ByteSizeValue(MergePolicyConfig.DEFAULT_MAX_MERGED_SEGMENT.getBytes() + 1).getMbFrac(), 0.0001); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getSegmentsPerTier(), + MergePolicyConfig.DEFAULT_SEGMENTS_PER_TIER, 0); + assertEquals(((EsTieredMergePolicy) indexSettings.getMergePolicy()).getDeletesPctAllowed(), + MergePolicyConfig.DEFAULT_DELETES_PCT_ALLOWED, 0); } public Settings build(String value) { diff --git a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java index adb7a087367d2..8d547c617e55b 100644 --- a/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java +++ b/server/src/test/java/org/elasticsearch/index/SearchSlowLogTests.java @@ -170,7 +170,8 @@ public void testSlowLogSearchContextPrinterToLog() throws IOException { SearchContext searchContext = createSearchContext(index); SearchSourceBuilder source = SearchSourceBuilder.searchSource().query(QueryBuilders.matchAllQuery()); searchContext.request().source(source); - searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"))); + searchContext.setTask(new SearchTask(0, "n/a", "n/a", "test", null, + Collections.singletonMap(Task.X_OPAQUE_ID, "my_id"))); SearchSlowLog.SlowLogSearchContextPrinter p = new SearchSlowLog.SlowLogSearchContextPrinter(searchContext, 10); assertThat(p.toString(), startsWith("[foo][0]")); // Makes sure that output doesn't contain any new lines @@ -188,14 +189,17 @@ public void testLevelSetting() { SearchSlowLog log = new SearchSlowLog(settings); assertEquals(level, log.getLevel()); level = randomFrom(SlowLogLevel.values()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build())); assertEquals(level, log.getLevel()); level = randomFrom(SlowLogLevel.values()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build())); assertEquals(level, log.getLevel()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), level).build())); assertEquals(level, log.getLevel()); settings.updateIndexMetaData(newIndexMeta("index", Settings.EMPTY)); @@ -207,7 +211,8 @@ public void testLevelSetting() { settings = new IndexSettings(metaData, Settings.EMPTY); log = new SearchSlowLog(settings); try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), "NOT A LEVEL").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_LEVEL.getKey(), "NOT A LEVEL").build())); fail(); } catch (IllegalArgumentException ex) { final String expected = "illegal value can't update [index.search.slowlog.level] from [TRACE] to [NOT A LEVEL]"; @@ -235,7 +240,8 @@ public void testSetQueryLevels() { assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getQueryInfoThreshold()); assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getQueryWarnThreshold()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "120ms") + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "120ms") .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "220ms") .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "320ms") .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "420ms").build())); @@ -263,28 +269,36 @@ public void testSetQueryLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryInfoThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getQueryWarnThreshold()); try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder() + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.search.slowlog.threshold.query.trace"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder() + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.search.slowlog.threshold.query.debug"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder() + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_INFO_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.search.slowlog.threshold.query.info"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder() + .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_QUERY_WARN_SETTING.getKey(), "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.search.slowlog.threshold.query.warn"); @@ -306,7 +320,8 @@ public void testSetFetchLevels() { assertEquals(TimeValue.timeValueMillis(300).nanos(), log.getFetchInfoThreshold()); assertEquals(TimeValue.timeValueMillis(400).nanos(), log.getFetchWarnThreshold()); - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "120ms") + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "120ms") .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), "220ms") .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), "320ms") .put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "420ms").build())); @@ -334,28 +349,36 @@ public void testSetFetchLevels() { assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchInfoThreshold()); assertEquals(TimeValue.timeValueMillis(-1).nanos(), log.getFetchWarnThreshold()); try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_TRACE_SETTING.getKey(), + "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.trace"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING.getKey(), + "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.debug"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_INFO_SETTING.getKey(), + "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.info"); } try { - settings.updateIndexMetaData(newIndexMeta("index", Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), "NOT A TIME VALUE").build())); + settings.updateIndexMetaData(newIndexMeta("index", + Settings.builder().put(SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING.getKey(), + "NOT A TIME VALUE").build())); fail(); } catch (IllegalArgumentException ex) { assertTimeValueException(ex, "index.search.slowlog.threshold.fetch.warn"); diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 1e1fb42ae7637..48dcb8271044e 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -86,11 +86,12 @@ public void testThatAnalyzersAreUsedInMapping() throws IOException { Version randomVersion = randomVersion(random()); Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion).build(); - NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX, randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get(); + NamedAnalyzer namedAnalyzer = new PreBuiltAnalyzerProvider(analyzerName, AnalyzerScope.INDEX, + randomPreBuiltAnalyzer.getAnalyzer(randomVersion)).get(); XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "text").field("analyzer", analyzerName).endObject().endObject() - .endObject().endObject(); + .startObject("properties").startObject("field").field("type", "text") + .field("analyzer", analyzerName).endObject().endObject().endObject().endObject(); MapperService mapperService = createIndex("test", indexSettings, "type", mapping).mapperService(); MappedFieldType fieldType = mapperService.fullName("field"); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java index 110e34d59a91c..1bf928fcb8060 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineMergeIT.java @@ -55,24 +55,32 @@ public void testMergesHappening() throws InterruptedException, IOException, Exec final int numDocs = scaledRandomIntBetween(100, 1000); BulkRequestBuilder request = client().prepareBulk(); for (int j = 0; j < numDocs; ++j) { - request.add(Requests.indexRequest("test").type("type1").id(Long.toString(id++)).source(jsonBuilder().startObject().field("l", randomLong()).endObject())); + request.add(Requests.indexRequest("test").type("type1").id(Long.toString(id++)) + .source(jsonBuilder().startObject().field("l", randomLong()).endObject())); } BulkResponse response = request.execute().actionGet(); refresh(); assertNoFailures(response); - IndicesStatsResponse stats = client().admin().indices().prepareStats("test").setSegments(true).setMerge(true).get(); - logger.info("index round [{}] - segments {}, total merges {}, current merge {}", i, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent()); + IndicesStatsResponse stats = client().admin().indices().prepareStats("test") + .setSegments(true).setMerge(true).get(); + logger.info("index round [{}] - segments {}, total merges {}, current merge {}", + i, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), + stats.getPrimaries().getMerge().getCurrent()); } final long upperNumberSegments = 2 * numOfShards * 10; awaitBusy(() -> { IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true).get(); - logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent()); + logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, + stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), + stats.getPrimaries().getMerge().getCurrent()); long current = stats.getPrimaries().getMerge().getCurrent(); long count = stats.getPrimaries().getSegments().getCount(); return count < upperNumberSegments && current == 0; }); IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).setMerge(true).get(); - logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), stats.getPrimaries().getMerge().getCurrent()); + logger.info("numshards {}, segments {}, total merges {}, current merge {}", numOfShards, + stats.getPrimaries().getSegments().getCount(), stats.getPrimaries().getMerge().getTotal(), + stats.getPrimaries().getMerge().getCurrent()); long count = stats.getPrimaries().getSegments().getCount(); assertThat(count, Matchers.lessThanOrEqualTo(upperNumberSegments)); } diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index d5dfe5e71fbdb..8072e69520d64 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -315,11 +315,15 @@ public void testSegments() throws Exception { segments = engine.segments(false); assertThat(segments.size(), equalTo(2)); assertThat(engine.segmentsStats(false).getCount(), equalTo(2L)); - assertThat(engine.segmentsStats(false).getTermsMemoryInBytes(), greaterThan(stats.getTermsMemoryInBytes())); - assertThat(engine.segmentsStats(false).getStoredFieldsMemoryInBytes(), greaterThan(stats.getStoredFieldsMemoryInBytes())); + assertThat(engine.segmentsStats(false).getTermsMemoryInBytes(), + greaterThan(stats.getTermsMemoryInBytes())); + assertThat(engine.segmentsStats(false).getStoredFieldsMemoryInBytes(), + greaterThan(stats.getStoredFieldsMemoryInBytes())); assertThat(engine.segmentsStats(false).getTermVectorsMemoryInBytes(), equalTo(0L)); - assertThat(engine.segmentsStats(false).getNormsMemoryInBytes(), greaterThan(stats.getNormsMemoryInBytes())); - assertThat(engine.segmentsStats(false).getDocValuesMemoryInBytes(), greaterThan(stats.getDocValuesMemoryInBytes())); + assertThat(engine.segmentsStats(false).getNormsMemoryInBytes(), + greaterThan(stats.getNormsMemoryInBytes())); + assertThat(engine.segmentsStats(false).getDocValuesMemoryInBytes(), + greaterThan(stats.getDocValuesMemoryInBytes())); assertThat(segments.get(0).getGeneration() < segments.get(1).getGeneration(), equalTo(true)); assertThat(segments.get(0).isCommitted(), equalTo(true)); assertThat(segments.get(0).isSearch(), equalTo(true)); @@ -531,7 +535,9 @@ public void testSegmentsWithIndexSort() throws Exception { Sort indexSort = new Sort(new SortedSetSortField("_type", false)); try (Store store = createStore(); Engine engine = - createEngine(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE, null, null, null, indexSort, null)) { + createEngine(defaultSettings, store, createTempDir(), + NoMergePolicy.INSTANCE, null, null, null, + indexSort, null)) { List segments = engine.segments(true); assertThat(segments.isEmpty(), equalTo(true)); @@ -634,7 +640,8 @@ public long getCheckpoint() { assertThat( stats2.getUserData().get(Translog.TRANSLOG_GENERATION_KEY), not(equalTo(stats1.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)))); - assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY))); + assertThat(stats2.getUserData().get(Translog.TRANSLOG_UUID_KEY), + equalTo(stats1.getUserData().get(Translog.TRANSLOG_UUID_KEY))); assertThat(Long.parseLong(stats2.getUserData().get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)), equalTo(localCheckpoint.get())); assertThat(stats2.getUserData(), hasKey(SequenceNumbers.MAX_SEQ_NO)); assertThat(Long.parseLong(stats2.getUserData().get(SequenceNumbers.MAX_SEQ_NO)), equalTo(maxSeqNo.get())); @@ -700,11 +707,15 @@ public void testTranslogMultipleOperationsSameDocument() throws IOException { for (int i = 0; i < ops; i++) { final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); if (randomBoolean()) { - final Engine.Index operation = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + final Engine.Index operation = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, + 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), + -1, false); operations.add(operation); initialEngine.index(operation); } else { - final Engine.Delete operation = new Engine.Delete("test", "1", newUid(doc), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime()); + final Engine.Delete operation = new Engine.Delete("test", "1", newUid(doc), + SequenceNumbers.UNASSIGNED_SEQ_NO, 0, i, VersionType.EXTERNAL, + Engine.Operation.Origin.PRIMARY, System.nanoTime()); operations.add(operation); initialEngine.delete(operation); } @@ -800,13 +811,15 @@ public void testTranslogRecoveryWithMultipleGenerations() throws IOException { public void testRecoveryFromTranslogUpToSeqNo() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), + null, null, globalCheckpoint::get); final long maxSeqNo; try (InternalEngine engine = createEngine(config)) { final int docs = randomIntBetween(1, 100); for (int i = 0; i < docs; i++) { final String id = Integer.toString(i); - final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); + final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), + SOURCE, null); engine.index(indexForDoc(doc)); if (rarely()) { engine.rollTranslogGeneration(); @@ -887,7 +900,8 @@ public void testSimpleOperations() throws Exception { // its not there... searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); // but, not there non realtime @@ -913,7 +927,8 @@ public void testSimpleOperations() throws Exception { // now its there... searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); // also in non realtime @@ -932,8 +947,10 @@ public void testSimpleOperations() throws Exception { // its not updated yet... searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // but, we can still get it (in realtime) @@ -947,8 +964,10 @@ public void testSimpleOperations() throws Exception { searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); // now delete @@ -957,8 +976,10 @@ public void testSimpleOperations() throws Exception { // its not deleted yet searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); // but, get should not see it (in realtime) @@ -971,8 +992,10 @@ public void testSimpleOperations() throws Exception { searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // add it back @@ -984,8 +1007,10 @@ public void testSimpleOperations() throws Exception { // its not there... searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // refresh and it should be there @@ -994,8 +1019,10 @@ public void testSimpleOperations() throws Exception { // now its there... searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // now flush @@ -1017,8 +1044,10 @@ public void testSimpleOperations() throws Exception { // its not updated yet... searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 0)); searchResult.close(); // refresh and it should be updated @@ -1026,8 +1055,10 @@ public void testSimpleOperations() throws Exception { searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test1")), 1)); searchResult.close(); } @@ -1043,7 +1074,8 @@ public void testSearchResultRelease() throws Exception { // its not there... searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 0)); searchResult.close(); // refresh and it should be there @@ -1052,7 +1084,8 @@ public void testSearchResultRelease() throws Exception { // now its there... searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); // don't release the search result yet... // delete, refresh and do a new search, it should not be there @@ -1064,7 +1097,8 @@ public void testSearchResultRelease() throws Exception { // the non release search result should not see the deleted yet... MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(new TermQuery(new Term("value", "test")), 1)); searchResult.close(); } @@ -1074,7 +1108,8 @@ public void testCommitAdvancesMinTranslogForRecovery() throws IOException { store = createStore(); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get(); - engine = createEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier)); + engine = createEngine(config(defaultSettings, store, translogPath, newMergePolicy(), null, null, + globalCheckpointSupplier)); ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); engine.index(indexForDoc(doc)); boolean inSync = randomBoolean(); @@ -1118,8 +1153,8 @@ public void testSyncedFlush() throws IOException { assertEquals("should fail to sync flush with wrong id (but no docs)", engine.syncFlush(syncId + "1", wrongId), Engine.SyncedFlushResult.COMMIT_MISMATCH); engine.index(indexForDoc(doc)); - assertEquals("should fail to sync flush with right id but pending doc", engine.syncFlush(syncId + "2", commitID), - Engine.SyncedFlushResult.PENDING_OPERATIONS); + assertEquals("should fail to sync flush with right id but pending doc", + engine.syncFlush(syncId + "2", commitID), Engine.SyncedFlushResult.PENDING_OPERATIONS); commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), Engine.SyncedFlushResult.SUCCESS); @@ -1135,18 +1170,24 @@ public void testRenewSyncFlush() throws Exception { InternalEngine engine = createEngine(config(defaultSettings, store, createTempDir(), new LogDocMergePolicy(), null))) { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); - Engine.Index doc1 = indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null)); + Engine.Index doc1 = + indexForDoc(testParsedDocument("1", null, testDocumentWithTextField(), B_1, null)); engine.index(doc1); assertEquals(engine.getLastWriteNanos(), doc1.startTime()); engine.flush(); - Engine.Index doc2 = indexForDoc(testParsedDocument("2", null, testDocumentWithTextField(), B_1, null)); + Engine.Index doc2 = + indexForDoc(testParsedDocument("2", null, testDocumentWithTextField(), B_1, null)); engine.index(doc2); assertEquals(engine.getLastWriteNanos(), doc2.startTime()); engine.flush(); final boolean forceMergeFlushes = randomBoolean(); - final ParsedDocument parsedDoc3 = testParsedDocument("3", null, testDocumentWithTextField(), B_1, null); + final ParsedDocument parsedDoc3 = + testParsedDocument("3", null, testDocumentWithTextField(), B_1, null); if (forceMergeFlushes) { - engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), -1, false)); + engine.index(new Engine.Index(newUid(parsedDoc3), parsedDoc3, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, + System.nanoTime() - engine.engineConfig.getFlushMergesAfter().nanos(), + -1, false)); } else { engine.index(indexForDoc(parsedDoc3)); } @@ -1155,7 +1196,8 @@ public void testRenewSyncFlush() throws Exception { Engine.SyncedFlushResult.SUCCESS); assertEquals(3, engine.segments(false).size()); - engine.forceMerge(forceMergeFlushes, 1, false, false, false); + engine.forceMerge(forceMergeFlushes, 1, false, + false, false); if (forceMergeFlushes == false) { engine.refresh("make all segments visible"); assertEquals(4, engine.segments(false).size()); @@ -1171,7 +1213,8 @@ public void testRenewSyncFlush() throws Exception { assertEquals(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID), syncId); if (randomBoolean()) { - Engine.Index doc4 = indexForDoc(testParsedDocument("4", null, testDocumentWithTextField(), B_1, null)); + Engine.Index doc4 = + indexForDoc(testParsedDocument("4", null, testDocumentWithTextField(), B_1, null)); engine.index(doc4); assertEquals(engine.getLastWriteNanos(), doc4.startTime()); } else { @@ -1180,7 +1223,8 @@ public void testRenewSyncFlush() throws Exception { assertEquals(engine.getLastWriteNanos(), delete.startTime()); } assertFalse(engine.tryRenewSyncCommit()); - engine.flush(false, true); // we might hit a concurrent flush from a finishing merge here - just wait if ongoing... + // we might hit a concurrent flush from a finishing merge here - just wait if ongoing... + engine.flush(false, true); assertNull(store.readLastCommittedSegmentsInfo().getUserData().get(Engine.SYNC_COMMIT_ID)); assertNull(engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); } @@ -1193,7 +1237,8 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { store = createStore(); engine = createEngine(store, primaryTranslogDir, globalCheckpoint::get); final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); - ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null); + ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), + new BytesArray("{}"), null); engine.index(indexForDoc(doc)); globalCheckpoint.set(0L); final Engine.CommitId commitID = engine.flush(); @@ -1221,7 +1266,8 @@ public void testSyncedFlushSurvivesEngineRestart() throws IOException { public void testSyncedFlushVanishesOnReplay() throws IOException { final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20); - ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}"), null); + ParsedDocument doc = testParsedDocument("1", null, + testDocumentWithTextField(), new BytesArray("{}"), null); engine.index(indexForDoc(doc)); final Engine.CommitId commitID = engine.flush(); assertEquals("should succeed to flush commit with right id and no pending doc", engine.syncFlush(syncId, commitID), @@ -1236,7 +1282,8 @@ public void testSyncedFlushVanishesOnReplay() throws IOException { engine = new InternalEngine(config); engine.initializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); - assertNull("Sync ID must be gone since we have a document to replay", engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); + assertNull("Sync ID must be gone since we have a document to replay", + engine.getLastCommittedSegmentInfos().getUserData().get(Engine.SYNC_COMMIT_ID)); } public void testVersioningNewCreate() throws IOException { @@ -1304,7 +1351,8 @@ public void testVersionedUpdate() throws IOException { Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), + searcherFactory)) { assertEquals(1, get.version()); } @@ -1312,7 +1360,8 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_1_result = engine.index(update_1); assertThat(update_1_result.getVersion(), equalTo(2L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), + searcherFactory)) { assertEquals(2, get.version()); } @@ -1320,7 +1369,8 @@ public void testVersionedUpdate() throws IOException { Engine.IndexResult update_2_result = engine.index(update_2); assertThat(update_2_result.getVersion(), equalTo(3L)); - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), + searcherFactory)) { assertEquals(3, get.version()); } @@ -1332,7 +1382,8 @@ public void testVersioningNewIndex() throws IOException { Engine.IndexResult indexResult = engine.index(index); assertThat(indexResult.getVersion(), equalTo(1L)); - index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); + index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), + index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, 0, -1, false); indexResult = replicaEngine.index(index); assertThat(indexResult.getVersion(), equalTo(1L)); } @@ -1362,7 +1413,8 @@ public void testForceMergeWithoutSoftDeletes() throws IOException { ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), B_1, null); Engine.Index index = indexForDoc(doc); engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); - engine.forceMerge(true, 10, true, false, false); //expunge deletes + //expunge deletes + engine.forceMerge(true, 10, true, false, false); engine.refresh("test"); assertEquals(engine.segments(true).size(), 1); @@ -1374,7 +1426,8 @@ public void testForceMergeWithoutSoftDeletes() throws IOException { doc = testParsedDocument(Integer.toString(1), null, testDocument(), B_1, null); index = indexForDoc(doc); engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); - engine.forceMerge(true, 10, false, false, false); //expunge deletes + //expunge deletes + engine.forceMerge(true, 10, false, false, false); engine.refresh("test"); assertEquals(engine.segments(true).size(), 1); try (Engine.Searcher test = engine.acquireSearcher("test")) { @@ -1396,7 +1449,8 @@ public void testForceMergeWithSoftDeletesRetention() throws Exception { final MapperService mapperService = createMapperService("test"); final Set liveDocs = new HashSet<>(); try (Store store = createStore(); - InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) { + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, + null, globalCheckpoint::get))) { int numDocs = scaledRandomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null); @@ -1470,12 +1524,14 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc final Set liveDocs = new HashSet<>(); final Set liveDocsWithSource = new HashSet<>(); try (Store store = createStore(); - InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, + InternalEngine engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, + null, globalCheckpoint::get))) { int numDocs = scaledRandomIntBetween(10, 100); for (int i = 0; i < numDocs; i++) { boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime; - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource); + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, + useRecoverySource); engine.index(indexForDoc(doc)); liveDocs.add(doc.id()); if (useRecoverySource == false) { @@ -1484,7 +1540,8 @@ public void testForceMergeWithSoftDeletesRetentionAndRecoverySource() throws Exc } for (int i = 0; i < numDocs; i++) { boolean useRecoverySource = randomBoolean() || omitSourceAllTheTime; - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, useRecoverySource); + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null, + useRecoverySource); if (randomBoolean()) { engine.delete(new Engine.Delete(doc.type(), doc.id(), newUid(doc.id()), primaryTerm.get())); liveDocs.remove(doc.id()); @@ -1575,14 +1632,16 @@ public void run() { int numDocs = randomIntBetween(1, 20); for (int j = 0; j < numDocs; j++) { i++; - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, null); + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), B_1, + null); Engine.Index index = indexForDoc(doc); engine.index(index); } engine.refresh("test"); indexed.countDown(); try { - engine.forceMerge(randomBoolean(), 1, false, randomBoolean(), randomBoolean()); + engine.forceMerge(randomBoolean(), 1, false, randomBoolean(), + randomBoolean()); } catch (IOException e) { return; } @@ -1611,11 +1670,13 @@ public void run() { public void testVersioningCreateExistsException() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); + Engine.Index create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); - create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, 0, -1, false); + create = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, + VersionType.INTERNAL, PRIMARY, 0, -1, false); indexResult = engine.index(create); assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -2010,7 +2071,8 @@ class OpAndVersion { throw new AssertionError(e); } for (int op = 0; op < opsPerThread; op++) { - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, + doc.type(), doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = new ArrayList<>(Strings.commaDelimitedListToSet(visitor.source().utf8ToString())); @@ -2052,7 +2114,8 @@ class OpAndVersion { assertTrue(op.added + " should not exist", exists); } - try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), uidTerm), searcherFactory)) { + try (Engine.GetResult get = engine.get(new Engine.Get(true, false, + doc.type(), doc.id(), uidTerm), searcherFactory)) { FieldsVisitor visitor = new FieldsVisitor(true); get.docIdAndVersion().reader.document(get.docIdAndVersion().docId, visitor); List values = Arrays.asList(Strings.commaDelimitedListToStringArray(visitor.source().utf8ToString())); @@ -2083,7 +2146,8 @@ private static class MockAppender extends AbstractAppender { public boolean sawIndexWriterIFDMessage; MockAppender(final String name) throws IllegalAccessException { - super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], false, null, null), null); + super(name, RegexFilter.createFilter(".*(\n.*)*", new String[0], + false, null, null), null); } @Override @@ -2150,9 +2214,11 @@ public void testSeqNoAndCheckpoints() throws IOException { try { initialEngine = createEngine(defaultSettings, store, createTempDir(), newLogMergePolicy(), null); - final ShardRouting primary = TestShardRouting.newShardRouting("test", shardId.id(), "node1", null, true, + final ShardRouting primary = TestShardRouting.newShardRouting("test", + shardId.id(), "node1", null, true, ShardRoutingState.STARTED, allocationId); - final ShardRouting replica = TestShardRouting.newShardRouting(shardId, "node2", false, ShardRoutingState.STARTED); + final ShardRouting replica = + TestShardRouting.newShardRouting(shardId, "node2", false, ShardRoutingState.STARTED); ReplicationTracker gcpTracker = (ReplicationTracker) initialEngine.config().getGlobalCheckpointSupplier(); gcpTracker.updateFromMaster(1L, new HashSet<>(Arrays.asList(primary.allocationId().getId(), replica.allocationId().getId())), @@ -2382,7 +2448,8 @@ private static FixedBitSet getSeqNosSet(final IndexReader reader, final long hig throw new AssertionError("Document does not have a seq number: " + docID); } final long seqNo = values.longValue(); - assertFalse("should not have more than one document with the same seq_no[" + seqNo + "]", bitSet.get((int) seqNo)); + assertFalse("should not have more than one document with the same seq_no[" + + seqNo + "]", bitSet.get((int) seqNo)); bitSet.set((int) seqNo); } } @@ -2435,10 +2502,13 @@ public void testEnableGcDeletes() throws Exception { document.add(new TextField("value", "test1", Field.Store.YES)); ParsedDocument doc = testParsedDocument("1", null, document, B_2, null); - engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); + engine.index(new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 1, + VersionType.EXTERNAL, + Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false)); // Delete document we just added: - engine.delete(new Engine.Delete("test", "1", newUid(doc), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); + engine.delete(new Engine.Delete("test", "1", newUid(doc), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document Engine.GetResult getResult = engine.get(newGet(true, doc), searcherFactory); @@ -2452,14 +2522,17 @@ public void testEnableGcDeletes() throws Exception { } // Delete non-existent document - engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); + engine.delete(new Engine.Delete("test", "2", newUid("2"), SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + 10, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime())); // Get should not find the document (we never indexed uid=2): - getResult = engine.get(new Engine.Get(true, false, "type", "2", newUid("2")), searcherFactory); + getResult = engine.get(new Engine.Get(true, false, "type", "2", newUid("2")), + searcherFactory); assertThat(getResult.exists(), equalTo(false)); // Try to index uid=1 with a too-old version, should fail: - Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, + VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(index); assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -2469,7 +2542,8 @@ public void testEnableGcDeletes() throws Exception { assertThat(getResult.exists(), equalTo(false)); // Try to index uid=2 with a too-old version, should fail: - Engine.Index index1 = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); + Engine.Index index1 = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, + VersionType.EXTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), -1, false); indexResult = engine.index(index1); assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.FAILURE)); assertThat(indexResult.getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -2543,15 +2617,18 @@ public void testSettings() { public void testCurrentTranslogIDisCommitted() throws IOException { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, + globalCheckpoint::get); // create { store.createEmpty(); final String translogUUID = - Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); - ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null); + ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), + new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); @@ -2591,7 +2668,8 @@ public void testCurrentTranslogIDisCommitted() throws IOException { // open index with new tlog { final String translogUUID = - Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); trimUnsafeCommits(config); try (InternalEngine engine = new InternalEngine(config)) { @@ -2616,7 +2694,8 @@ public void testCurrentTranslogIDisCommitted() throws IOException { engine.initializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); userData = engine.getLastCommittedSegmentInfos().getUserData(); - assertEquals("no changes - nothing to commit", "1", userData.get(Translog.TRANSLOG_GENERATION_KEY)); + assertEquals("no changes - nothing to commit", "1", + userData.get(Translog.TRANSLOG_GENERATION_KEY)); assertEquals(engine.getTranslog().getTranslogUUID(), userData.get(Translog.TRANSLOG_UUID_KEY)); } } @@ -2640,7 +2719,8 @@ public void testMissingTranslog() throws IOException { // expected } // when a new translog is created it should be ok - final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm); + final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, + SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm); store.associateIndexWithNewTranslog(translogUUID); EngineConfig config = config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null); engine = new InternalEngine(config); @@ -2653,7 +2733,8 @@ public void testTranslogReplayWithFailure() throws IOException { final int numDocs = randomIntBetween(1, 10); try (InternalEngine engine = createEngine(store, translogPath)) { for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), + new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(firstIndexRequest); @@ -2722,7 +2803,8 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s }) { engine.initializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); - final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), SOURCE, null); + final ParsedDocument doc1 = testParsedDocument("1", null, + testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc1)); globalCheckpoint.set(engine.getLocalCheckpoint()); throwErrorOnCommit.set(true); @@ -2748,8 +2830,10 @@ protected void commitIndexWriter(IndexWriter writer, Translog translog, String s public void testSkipTranslogReplay() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, + testDocument(), new BytesArray("{}"), null); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, + SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); } @@ -2788,8 +2872,11 @@ public void testTranslogReplay() throws IOException { final LongSupplier inSyncGlobalCheckpointSupplier = () -> this.engine.getLocalCheckpoint(); final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), + new BytesArray("{}"), null); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, + SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, + System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); } @@ -2799,7 +2886,8 @@ public void testTranslogReplay() throws IOException { engine.close(); trimUnsafeCommits(copy(engine.config(), inSyncGlobalCheckpointSupplier)); - engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); // we need to reuse the engine config unless the parser.mappingModified won't work + // we need to reuse the engine config unless the parser.mappingModified won't work + engine = new InternalEngine(copy(engine.config(), inSyncGlobalCheckpointSupplier)); engine.initializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); @@ -2820,8 +2908,10 @@ public void testTranslogReplay() throws IOException { final boolean flush = randomBoolean(); int randomId = randomIntBetween(numDocs + 1, numDocs + 10); - ParsedDocument doc = testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); + ParsedDocument doc = + testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + 1, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult indexResult = engine.index(firstIndexRequest); assertThat(indexResult.getVersion(), equalTo(1L)); if (flush) { @@ -2830,7 +2920,8 @@ public void testTranslogReplay() throws IOException { } doc = testParsedDocument(Integer.toString(randomId), null, testDocument(), new BytesArray("{}"), null); - Engine.Index idxRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); + Engine.Index idxRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, 2, + VersionType.EXTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult result = engine.index(idxRequest); engine.refresh("test"); assertThat(result.getVersion(), equalTo(2L)); @@ -2863,8 +2954,10 @@ public void testTranslogReplay() throws IOException { public void testRecoverFromForeignTranslog() throws IOException { final int numDocs = randomIntBetween(1, 10); for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); + ParsedDocument doc = + testParsedDocument(Integer.toString(i), null, testDocument(), new BytesArray("{}"), null); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_DELETED, VersionType.INTERNAL, PRIMARY, System.nanoTime(), -1, false); Engine.IndexResult index = engine.index(firstIndexRequest); assertThat(index.getVersion(), equalTo(1L)); } @@ -2877,7 +2970,8 @@ public void testRecoverFromForeignTranslog() throws IOException { Translog translog = new Translog( new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); - translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(), "{}".getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(), + "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); @@ -2914,7 +3008,8 @@ public void run() { try { switch (operation) { case "optimize": { - engine.forceMerge(true, 1, false, false, false); + engine.forceMerge(true, 1, false, false, + false); break; } case "refresh": { @@ -2937,7 +3032,8 @@ public void run() { engine.close(); mergeThread.join(); logger.info("exception caught: ", exception.get()); - assertTrue("expected an Exception that signals shard is not available", TransportActions.isShardNotAvailableException(exception.get())); + assertTrue("expected an Exception that signals shard is not available", + TransportActions.isShardNotAvailableException(exception.get())); } /** @@ -3129,7 +3225,8 @@ public BytesRef binaryValue() { } public void testDoubleDeliveryPrimary() throws IOException { - final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), + new BytesArray("{}".getBytes(Charset.defaultCharset())), null); Engine.Index operation = appendOnlyPrimary(doc, false, 1); Engine.Index retry = appendOnlyPrimary(doc, true, 1); if (randomBoolean()) { @@ -3188,8 +3285,8 @@ public void testDoubleDeliveryReplicaAppendingAndDeleteOnly() throws IOException Engine.Index operation = appendOnlyReplica(doc, false, 1, randomIntBetween(0, 5)); Engine.Index retry = appendOnlyReplica(doc, true, 1, randomIntBetween(0, 5)); Engine.Delete delete = new Engine.Delete(operation.type(), operation.id(), operation.uid(), - Math.max(retry.seqNo(), operation.seqNo())+1, operation.primaryTerm(), operation.version()+1, operation.versionType(), - REPLICA, operation.startTime()+1); + Math.max(retry.seqNo(), operation.seqNo())+1, operation.primaryTerm(), operation.version()+1, + operation.versionType(), REPLICA, operation.startTime()+1); // operations with a seq# equal or lower to the local checkpoint are not indexed to lucene // and the version lookup is skipped final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0; @@ -3341,20 +3438,24 @@ public void testDoubleDeliveryReplica() throws IOException { public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOException { - final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), + new BytesArray("{}".getBytes(Charset.defaultCharset())), null); boolean isRetry = false; long autoGeneratedIdTimestamp = 0; - Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, + Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); Engine.IndexResult indexResult = engine.index(index); assertThat(indexResult.getVersion(), equalTo(1L)); - index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), + index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); indexResult = replicaEngine.index(index); assertThat(indexResult.getVersion(), equalTo(1L)); isRetry = true; - index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + index = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, + VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); indexResult = engine.index(index); assertThat(indexResult.getVersion(), equalTo(1L)); engine.refresh("test"); @@ -3363,7 +3464,8 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep assertEquals(1, topDocs.totalHits); } - index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + index = new Engine.Index(newUid(doc), doc, indexResult.getSeqNo(), index.primaryTerm(), indexResult.getVersion(), + index.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); indexResult = replicaEngine.index(index); assertThat(indexResult.getResultType(), equalTo(Engine.Result.Type.SUCCESS)); replicaEngine.refresh("test"); @@ -3375,20 +3477,25 @@ public void testRetryWithAutogeneratedIdWorksAndNoDuplicateDocs() throws IOExcep public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() throws IOException { - final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), + new BytesArray("{}".getBytes(Charset.defaultCharset())), null); boolean isRetry = true; long autoGeneratedIdTimestamp = 0; - Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, + 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); Engine.IndexResult result = engine.index(firstIndexRequest); assertThat(result.getVersion(), equalTo(1L)); - Engine.Index firstIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), firstIndexRequest.primaryTerm(), result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index firstIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), + firstIndexRequest.primaryTerm(), result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), + REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); Engine.IndexResult indexReplicaResult = replicaEngine.index(firstIndexRequestReplica); assertThat(indexReplicaResult.getVersion(), equalTo(1L)); isRetry = false; - Engine.Index secondIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index secondIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, + 0, Versions.MATCH_ANY, VersionType.INTERNAL, PRIMARY, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); Engine.IndexResult indexResult = engine.index(secondIndexRequest); assertTrue(indexResult.isCreated()); engine.refresh("test"); @@ -3397,7 +3504,9 @@ public void testRetryWithAutogeneratedIdsAndWrongOrderWorksAndNoDuplicateDocs() assertEquals(1, topDocs.totalHits); } - Engine.Index secondIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), secondIndexRequest.primaryTerm(), result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), autoGeneratedIdTimestamp, isRetry); + Engine.Index secondIndexRequestReplica = new Engine.Index(newUid(doc), doc, result.getSeqNo(), secondIndexRequest.primaryTerm(), + result.getVersion(), firstIndexRequest.versionType().versionTypeForReplicationAndRecovery(), REPLICA, System.nanoTime(), + autoGeneratedIdTimestamp, isRetry); replicaEngine.index(secondIndexRequestReplica); replicaEngine.refresh("test"); try (Engine.Searcher searcher = replicaEngine.acquireSearcher("test")) { @@ -3430,7 +3539,8 @@ public void testRetryConcurrently() throws InterruptedException, IOException { List docs = new ArrayList<>(); final boolean primary = randomBoolean(); for (int i = 0; i < numDocs; i++) { - final ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + final ParsedDocument doc = testParsedDocument(Integer.toString(i), null, + testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); final Engine.Index originalIndex; final Engine.Index retryIndex; if (primary) { @@ -3500,9 +3610,11 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { final long timestamp2 = randomNonNegativeLong(); final long maxTimestamp12 = Math.max(timestamp1, timestamp2); final Function configSupplier = - store -> config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); + store -> config(defaultSettings, store, translogDir, + NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get); try (Store store = createStore(newFSDirectory(storeDir)); Engine engine = createEngine(configSupplier.apply(store))) { - assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); + assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, + engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); engine.index(appendOnlyPrimary(doc, true, timestamp1)); @@ -3510,7 +3622,8 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { } try (Store store = createStore(newFSDirectory(storeDir)); InternalEngine engine = new InternalEngine(configSupplier.apply(store))) { - assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); + assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, + engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); engine.initializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp()); @@ -3523,7 +3636,8 @@ public void testEngineMaxTimestampIsInitialized() throws IOException { } try (Store store = createStore(newFSDirectory(storeDir))) { if (randomBoolean() || true) { - final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + final String translogUUID = Translog.createEmptyTranslog(translogDir, + SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); } try (Engine engine = new InternalEngine(configSupplier.apply(store))) { @@ -3540,7 +3654,8 @@ public void testAppendConcurrently() throws InterruptedException, IOException { boolean primary = randomBoolean(); List docs = new ArrayList<>(); for (int i = 0; i < numDocs; i++) { - final ParsedDocument doc = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); + final ParsedDocument doc = testParsedDocument(Integer.toString(i), null, + testDocumentWithTextField(), new BytesArray("{}".getBytes(Charset.defaultCharset())), null); Engine.Index index = primary ? appendOnlyPrimary(doc, false, i) : appendOnlyReplica(doc, false, i, i); docs.add(index); } @@ -3664,7 +3779,8 @@ public void afterRefresh(boolean didRefresh) throws IOException { } public void testSequenceIDs() throws Exception { - Tuple seqID = getSequenceID(engine, new Engine.Get(false, false, "type", "2", newUid("1"))); + Tuple seqID = getSequenceID(engine, new Engine.Get(false, false, + "type", "2", newUid("1"))); // Non-existent doc returns no seqnum and no primary term assertThat(seqID.v1(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); assertThat(seqID.v2(), equalTo(0L)); @@ -3710,7 +3826,8 @@ public void testSequenceIDs() throws Exception { // we can query by the _seq_no Engine.Searcher searchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(1)); - MatcherAssert.assertThat(searchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(LongPoint.newExactQuery("_seq_no", 2), 1)); + MatcherAssert.assertThat(searchResult, + EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(LongPoint.newExactQuery("_seq_no", 2), 1)); searchResult.close(); } @@ -3752,7 +3869,8 @@ public void testLookupSeqNoByIdInLucene() throws Exception { String msg = "latestOps=" + latestOps + " op=" + id; DocIdAndSeqNo docIdAndSeqNo = VersionsAndSeqNoResolver.loadDocIdAndSeqNo(searcher.reader(), newUid(id)); assertThat(msg, docIdAndSeqNo.seqNo, equalTo(latestOps.get(id).seqNo())); - assertThat(msg, docIdAndSeqNo.isLive, equalTo(latestOps.get(id).operationType() == Engine.Operation.TYPE.INDEX)); + assertThat(msg, docIdAndSeqNo.isLive, + equalTo(latestOps.get(id).operationType() == Engine.Operation.TYPE.INDEX)); } assertThat(VersionsAndSeqNoResolver.loadDocIdAndVersion( searcher.reader(), newUid("any-" + between(1, 10))), nullValue()); @@ -3836,7 +3954,9 @@ public void testSequenceNumberAdvancesToMaxSeqOnEngineOpenOnPrimary() throws Bro final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final List threads = new ArrayList<>(); initialEngine = - createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, LocalCheckpointTracker::new, null, getStallingSeqNoGenerator(latchReference, barrier, stall, expectedLocalCheckpoint)); + createEngine(defaultSettings, store, primaryTranslogDir, + newMergePolicy(), null, LocalCheckpointTracker::new, null, + getStallingSeqNoGenerator(latchReference, barrier, stall, expectedLocalCheckpoint)); final InternalEngine finalInitialEngine = initialEngine; for (int i = 0; i < docs; i++) { final String id = Integer.toString(i); @@ -3956,7 +4076,8 @@ public void testOutOfOrderSequenceNumbersWithVersionConflict() throws IOExceptio } assertThat(engine.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); - try (Engine.GetResult result = engine.get(new Engine.Get(true, false, "type", "2", uid), searcherFactory)) { + try (Engine.GetResult result = engine.get(new Engine.Get(true, false, + "type", "2", uid), searcherFactory)) { assertThat(result.exists(), equalTo(exists)); } } @@ -3991,7 +4112,8 @@ protected long doGenerateSeqNoForOperation(Operation operation) { assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 1))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled)); noOpEngine.noOp( - new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason)); + new Engine.NoOp(maxSeqNo + 2, primaryTerm.get(), + randomFrom(PRIMARY, REPLICA, PEER_RECOVERY), System.nanoTime(), reason)); assertThat(noOpEngine.getLocalCheckpoint(), equalTo((long) (maxSeqNo + 2))); assertThat(noOpEngine.getTranslog().stats().getUncommittedOperations(), equalTo(gapsFilled + 1)); // skip to the op that we added to the translog @@ -4013,7 +4135,8 @@ protected long doGenerateSeqNoForOperation(Operation operation) { List operationsFromLucene = readAllOperationsInLucene(noOpEngine, mapperService); assertThat(operationsFromLucene, hasSize(maxSeqNo + 2 - localCheckpoint)); // fills n gap and 2 manual noop. for (int i = 0; i < operationsFromLucene.size(); i++) { - assertThat(operationsFromLucene.get(i), equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "filling gaps"))); + assertThat(operationsFromLucene.get(i), + equalTo(new Translog.NoOp(localCheckpoint + 1 + i, primaryTerm.get(), "filling gaps"))); } assertConsistentHistoryBetweenTranslogAndLuceneIndex(noOpEngine, mapperService); } @@ -4090,7 +4213,9 @@ public void testMinGenerationForSeqNo() throws IOException, BrokenBarrierExcepti final AtomicLong expectedLocalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final Map threads = new LinkedHashMap<>(); actualEngine = - createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, LocalCheckpointTracker::new, null, getStallingSeqNoGenerator(latchReference, barrier, stall, expectedLocalCheckpoint)); + createEngine(defaultSettings, store, primaryTranslogDir, + newMergePolicy(), null, LocalCheckpointTracker::new, null, + getStallingSeqNoGenerator(latchReference, barrier, stall, expectedLocalCheckpoint)); final InternalEngine finalActualEngine = actualEngine; final Translog translog = finalActualEngine.getTranslog(); final long generation = finalActualEngine.getTranslog().currentFileGeneration(); @@ -4198,7 +4323,8 @@ public void testRestoreLocalHistoryFromTranslog() throws IOException { engineConfig = engine.config(); for (final long seqNo : seqNos) { final String id = Long.toString(seqNo); - final ParsedDocument doc = testParsedDocument(id, null, testDocumentWithTextField(), SOURCE, null); + final ParsedDocument doc = testParsedDocument(id, null, + testDocumentWithTextField(), SOURCE, null); engine.index(replicaIndexForDoc(doc, 1, seqNo, false)); if (rarely()) { engine.rollTranslogGeneration(); @@ -4408,7 +4534,8 @@ public void testSeqNoGenerator() throws IOException { SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED); final AtomicLong seqNoGenerator = new AtomicLong(seqNo); - try (Engine e = createEngine(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null, localCheckpointTrackerSupplier, + try (Engine e = createEngine(defaultSettings, store, primaryTranslogDir, + newMergePolicy(), null, localCheckpointTrackerSupplier, null, (engine, operation) -> seqNoGenerator.getAndIncrement())) { final String id = "id"; final Field uidField = new Field("_id", id, IdFieldMapper.Defaults.FIELD_TYPE); @@ -4481,8 +4608,8 @@ public void testKeepTranslogAfterGlobalCheckpoint() throws Exception { final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); - final EngineConfig engineConfig = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, - () -> globalCheckpoint.get()); + final EngineConfig engineConfig = config(indexSettings, store, translogPath, + NoMergePolicy.INSTANCE, null, null, () -> globalCheckpoint.get()); try (InternalEngine engine = new InternalEngine(engineConfig) { @Override protected void commitIndexWriter(IndexWriter writer, Translog translog, String syncId) throws IOException { @@ -4630,7 +4757,8 @@ public void testCleanUpCommitsWhenGlobalCheckpointAdvanced() throws Exception { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); try (Store store = createStore(); InternalEngine engine = - createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get))) { + createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), + null, null, globalCheckpoint::get))) { final int numDocs = scaledRandomIntBetween(10, 100); for (int docId = 0; docId < numDocs; docId++) { index(engine, docId); @@ -4687,10 +4815,12 @@ public void testShouldPeriodicallyFlush() throws Exception { assertThat("Empty engine does not need flushing", engine.shouldPeriodicallyFlush(), equalTo(false)); // A new engine may have more than one empty translog files - the test should account this extra. final Translog translog = engine.getTranslog(); - final long extraTranslogSizeInNewEngine = engine.getTranslog().stats().getUncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES; + final long extraTranslogSizeInNewEngine = + engine.getTranslog().stats().getUncommittedSizeInBytes() - Translog.DEFAULT_HEADER_SIZE_IN_BYTES; int numDocs = between(10, 100); for (int id = 0; id < numDocs; id++) { - final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); + final ParsedDocument doc = + testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); engine.index(indexForDoc(doc)); } assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false)); @@ -4708,7 +4838,8 @@ public void testShouldPeriodicallyFlush() throws Exception { assertThat(engine.getTranslog().stats().getUncommittedOperations(), equalTo(0)); // Stale operations skipped by Lucene but added to translog - still able to flush for (int id = 0; id < numDocs; id++) { - final ParsedDocument doc = testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); + final ParsedDocument doc = + testParsedDocument(Integer.toString(id), null, testDocumentWithTextField(), SOURCE, null); final Engine.IndexResult result = engine.index(replicaIndexForDoc(doc, 1L, id, false)); assertThat(result.isCreated(), equalTo(false)); } @@ -4725,7 +4856,8 @@ public void testShouldPeriodicallyFlush() throws Exception { if (randomBoolean()) { translog.rollGeneration(); } - final ParsedDocument doc = testParsedDocument("new" + id, null, testDocumentWithTextField(), SOURCE, null); + final ParsedDocument doc = + testParsedDocument("new" + id, null, testDocumentWithTextField(), SOURCE, null); engine.index(replicaIndexForDoc(doc, 2L, generateNewSeqNo(engine), false)); if (engine.shouldPeriodicallyFlush()) { engine.flush(); @@ -4749,7 +4881,8 @@ public void testStressShouldPeriodicallyFlush() throws Exception { for (int i = 0; i < numOps; i++) { final long localCheckPoint = engine.getLocalCheckpoint(); final long seqno = randomLongBetween(Math.max(0, localCheckPoint), localCheckPoint + 5); - final ParsedDocument doc = testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null); + final ParsedDocument doc = + testParsedDocument(Long.toString(seqno), null, testDocumentWithTextField(), SOURCE, null); engine.index(replicaIndexForDoc(doc, 1L, seqno, false)); if (rarely() && engine.getTranslog().shouldRollGeneration()) { engine.rollTranslogGeneration(); @@ -4772,7 +4905,8 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), TimeValue.timeValueMillis(1))).build(); engine.engineConfig.getIndexSettings().updateIndexMetaData(indexMetaData); engine.onSettingsChanged(); - ParsedDocument document = testParsedDocument(Integer.toString(0), null, testDocumentWithTextField(), SOURCE, null); + ParsedDocument document = + testParsedDocument(Integer.toString(0), null, testDocumentWithTextField(), SOURCE, null); final Engine.Index doc = new Engine.Index(newUid(document), document, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false); // first index an append only document and then delete it. such that we have it in the tombstones @@ -4780,15 +4914,18 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid(), primaryTerm.get())); // now index more append only docs and refresh so we re-enabel the optimization for unsafe version map - ParsedDocument document1 = testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null); + ParsedDocument document1 = + testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null); engine.index(new Engine.Index(newUid(document1), document1, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false)); engine.refresh("test"); - ParsedDocument document2 = testParsedDocument(Integer.toString(2), null, testDocumentWithTextField(), SOURCE, null); + ParsedDocument document2 = + testParsedDocument(Integer.toString(2), null, testDocumentWithTextField(), SOURCE, null); engine.index(new Engine.Index(newUid(document2), document2, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false)); engine.refresh("test"); - ParsedDocument document3 = testParsedDocument(Integer.toString(3), null, testDocumentWithTextField(), SOURCE, null); + ParsedDocument document3 = + testParsedDocument(Integer.toString(3), null, testDocumentWithTextField(), SOURCE, null); final Engine.Index doc3 = new Engine.Index(newUid(document3), document3, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false); engine.index(doc3); @@ -4800,14 +4937,16 @@ public void testStressUpdateSameDocWhileGettingIt() throws IOException, Interrup CountDownLatch awaitStarted = new CountDownLatch(1); Thread thread = new Thread(() -> { awaitStarted.countDown(); - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc3.type(), doc3.id(), doc3.uid()), + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc3.type(), + doc3.id(), doc3.uid()), engine::acquireSearcher)) { assertTrue(getResult.exists()); } }); thread.start(); awaitStarted.await(); - try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), doc.uid()), + try (Engine.GetResult getResult = engine.get(new Engine.Get(true, false, doc.type(), + doc.id(), doc.uid()), engine::acquireSearcher)) { assertFalse(getResult.exists()); } @@ -4859,9 +4998,11 @@ public void testPruneOnlyDeletesAtMostLocalCheckpoint() throws Exception { // Fill the seqno gap - should prune all tombstones. clock.set(between(0, 100)); if (randomBoolean()) { - engine.index(replicaIndexForDoc(testParsedDocument("d", null, testDocumentWithTextField(), SOURCE, null), 1, gapSeqNo, false)); + engine.index(replicaIndexForDoc(testParsedDocument("d", null, testDocumentWithTextField(), + SOURCE, null), 1, gapSeqNo, false)); } else { - engine.delete(replicaDeleteForDoc(UUIDs.randomBase64UUID(), Versions.MATCH_ANY, gapSeqNo, threadPool.relativeTimeInMillis())); + engine.delete(replicaDeleteForDoc(UUIDs.randomBase64UUID(), Versions.MATCH_ANY, + gapSeqNo, threadPool.relativeTimeInMillis())); } clock.set(randomLongBetween(100 + gcInterval * 4/3, Long.MAX_VALUE)); // Need a margin for gcInterval/4. engine.refresh("test"); @@ -4881,7 +5022,8 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { latch.countDown(); final int numDocs = scaledRandomIntBetween(100, 1000); for (int i = 0; i < numDocs; i++) { - ParsedDocument doc = testParsedDocument("append-only" + i, null, testDocumentWithTextField(), SOURCE, null); + ParsedDocument doc = + testParsedDocument("append-only" + i, null, testDocumentWithTextField(), SOURCE, null); if (randomBoolean()) { engine.index(appendOnlyReplica(doc, randomBoolean(), 1, generateNewSeqNo(engine))); } else { @@ -4898,7 +5040,8 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { long maxSeqNoOfNonAppendOnly = SequenceNumbers.NO_OPS_PERFORMED; final int numOps = scaledRandomIntBetween(100, 1000); for (int i = 0; i < numOps; i++) { - ParsedDocument parsedDocument = testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), SOURCE, null); + ParsedDocument parsedDocument = + testParsedDocument(Integer.toString(i), null, testDocumentWithTextField(), SOURCE, null); if (randomBoolean()) { // On replica - update max_seqno for non-append-only operations final long seqno = generateNewSeqNo(engine); final Engine.Index doc = replicaIndexForDoc(parsedDocument, 1, seqno, randomBoolean()); @@ -4913,7 +5056,8 @@ public void testTrackMaxSeqNoOfNonAppendOnlyOperations() throws Exception { if (randomBoolean()) { engine.index(indexForDoc(parsedDocument)); } else { - engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()), primaryTerm.get())); + engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), + newUid(parsedDocument.id()), primaryTerm.get())); } } } @@ -4956,7 +5100,8 @@ public void testSkipOptimizeForExposedAppendOnlyOperations() throws Exception { assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); // optimize for other append-only 2 (its seqno > max_seqno of non-append-only) - do not look up in version map. - engine.index(appendOnlyReplica(testParsedDocument("append-only-2", null, testDocumentWithTextField(), SOURCE, null), + engine.index(appendOnlyReplica(testParsedDocument("append-only-2", null, + testDocumentWithTextField(), SOURCE, null), false, randomNonNegativeLong(), generateNewSeqNo(engine))); assertThat(engine.getNumVersionLookups(), equalTo(lookupTimes)); } @@ -4967,12 +5112,14 @@ public void testTrimUnsafeCommits() throws Exception { final List seqNos = LongStream.rangeClosed(0, maxSeqNo).boxed().collect(Collectors.toList()); Collections.shuffle(seqNos, random()); try (Store store = createStore()) { - EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get); + EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), + null, null, globalCheckpoint::get); final List commitMaxSeqNo = new ArrayList<>(); final long minTranslogGen; try (InternalEngine engine = createEngine(config)) { for (int i = 0; i < seqNos.size(); i++) { - ParsedDocument doc = testParsedDocument(Long.toString(seqNos.get(i)), null, testDocument(), new BytesArray("{}"), null); + ParsedDocument doc = testParsedDocument(Long.toString(seqNos.get(i)), null, testDocument(), + new BytesArray("{}"), null); Engine.Index index = new Engine.Index(newUid(doc), doc, seqNos.get(i), 0, 1, VersionType.EXTERNAL, REPLICA, System.nanoTime(), -1, false); engine.index(index); @@ -5077,7 +5224,8 @@ public void testKeepMinRetainedSeqNoByMergePolicy() throws IOException { Randomness.shuffle(operations); Set existingSeqNos = new HashSet<>(); store = createStore(); - engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, globalCheckpoint::get)); + engine = createEngine(config(indexSettings, store, createTempDir(), newMergePolicy(), null, null, + globalCheckpoint::get)); assertThat(engine.getMinRetainedSeqNo(), equalTo(0L)); long lastMinRetainedSeqNo = engine.getMinRetainedSeqNo(); for (Engine.Operation op : operations) { @@ -5274,7 +5422,8 @@ public void testRebuildLocalCheckpointTracker() throws Exception { List> commits = new ArrayList<>(); commits.add(new ArrayList<>()); try (Store store = createStore()) { - EngineConfig config = config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpoint::get); + EngineConfig config = config(indexSettings, store, translogPath, + newMergePolicy(), null, null, globalCheckpoint::get); final List docs; try (InternalEngine engine = createEngine(config)) { List flushedOperations = new ArrayList<>(); @@ -5312,8 +5461,8 @@ public void testRebuildLocalCheckpointTracker() throws Exception { try (InternalEngine engine = new InternalEngine(config)) { // do not recover from translog final LocalCheckpointTracker tracker = engine.getLocalCheckpointTracker(); for (Engine.Operation op : operations) { - assertThat("seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + " checkpoint=" + tracker.getCheckpoint(), - tracker.contains(op.seqNo()), equalTo(safeCommit.contains(op))); + assertThat("seq_no=" + op.seqNo() + " max_seq_no=" + tracker.getMaxSeqNo() + + " checkpoint=" + tracker.getCheckpoint(), tracker.contains(op.seqNo()), equalTo(safeCommit.contains(op))); } engine.initializeMaxSeqNoOfUpdatesOrDeletes(); engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); diff --git a/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java index a417cba13b9a4..fae252728d46d 100644 --- a/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/BoolQueryBuilderTests.java @@ -113,7 +113,8 @@ protected void doAssertLuceneQuery(BoolQueryBuilder queryBuilder, Query query, S } } - private static List getBooleanClauses(List queryBuilders, BooleanClause.Occur occur, QueryShardContext context) throws IOException { + private static List getBooleanClauses(List queryBuilders, + BooleanClause.Occur occur, QueryShardContext context) throws IOException { List clauses = new ArrayList<>(); for (QueryBuilder query : queryBuilders) { Query innerQuery = query.toQuery(context); @@ -182,11 +183,13 @@ public void testDefaultMinShouldMatch() throws Exception { assertEquals(0, bq.getMinimumNumberShouldMatch()); // Filters have a minShouldMatch of 0/1 - ConstantScoreQuery csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().must(termQuery("foo", "bar")))).toQuery(createShardContext()); + ConstantScoreQuery csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery() + .must(termQuery("foo", "bar")))).toQuery(createShardContext()); bq = (BooleanQuery) csq.getQuery(); assertEquals(0, bq.getMinimumNumberShouldMatch()); - csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().should(termQuery("foo", "bar")))).toQuery(createShardContext()); + csq = (ConstantScoreQuery) parseQuery(constantScoreQuery(boolQuery().should(termQuery("foo", "bar")))) + .toQuery(createShardContext()); bq = (BooleanQuery) csq.getQuery(); assertEquals(1, bq.getMinimumNumberShouldMatch()); } @@ -433,7 +436,8 @@ public void testRewriteWithMatchNone() throws IOException { boolQueryBuilder = new BoolQueryBuilder(); boolQueryBuilder.must(new TermQueryBuilder("foo","bar")); - boolQueryBuilder.filter(new BoolQueryBuilder().should(new TermQueryBuilder("foo","bar")).filter(new MatchNoneQueryBuilder())); + boolQueryBuilder.filter(new BoolQueryBuilder().should(new TermQueryBuilder("foo","bar")) + .filter(new MatchNoneQueryBuilder())); rewritten = Rewriteable.rewrite(boolQueryBuilder, createShardContext()); assertEquals(new MatchNoneQueryBuilder(), rewritten); } diff --git a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java index 49cb4442beb8c..d328c68362682 100644 --- a/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/BoostingQueryBuilderTests.java @@ -33,7 +33,8 @@ public class BoostingQueryBuilderTests extends AbstractQueryTestCase failingQueryBuilder.toQuery(shardContext)); assertThat(e.getMessage(), containsString("failed to find geo_point field [unmapped]")); diff --git a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java index b54ce571453b3..18cb8106b9af9 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilderTests.java @@ -238,7 +238,8 @@ protected MultiTermVectorsResponse executeMultiTermVectors(MultiTermVectorsReque if (request.doc() != null) { generatedFields = generateFields(randomFields, request.doc().utf8ToString()); } else { - generatedFields = generateFields(request.selectedFields().toArray(new String[request.selectedFields().size()]), request.id()); + generatedFields = + generateFields(request.selectedFields().toArray(new String[request.selectedFields().size()]), request.id()); } EnumSet flags = EnumSet.of(TermVectorsRequest.Flag.Positions, TermVectorsRequest.Flag.Offsets); response.setFields(generatedFields, request.selectedFields(), flags, generatedFields); @@ -289,21 +290,25 @@ public void testValidateEmptyFields() { public void testValidateEmptyLike() { String[] likeTexts = randomBoolean() ? null : new String[0]; Item[] likeItems = randomBoolean() ? null : new Item[0]; - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MoreLikeThisQueryBuilder(likeTexts, likeItems)); + IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> new MoreLikeThisQueryBuilder(likeTexts, likeItems)); assertThat(e.getMessage(), containsString("requires either 'like' texts or items to be specified")); } public void testUnsupportedFields() throws IOException { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); String unsupportedField = randomFrom(INT_FIELD_NAME, DOUBLE_FIELD_NAME, DATE_FIELD_NAME); - MoreLikeThisQueryBuilder queryBuilder = new MoreLikeThisQueryBuilder(new String[] {unsupportedField}, new String[]{"some text"}, null) + MoreLikeThisQueryBuilder queryBuilder = + new MoreLikeThisQueryBuilder(new String[] {unsupportedField}, new String[]{"some text"}, null) .failOnUnsupportedField(true); IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> queryBuilder.toQuery(createShardContext())); assertThat(e.getMessage(), containsString("more_like_this only supports text/keyword fields")); } public void testMoreLikeThisBuilder() throws Exception { - Query parsedQuery = parseQuery(moreLikeThisQuery(new String[]{"name.first", "name.last"}, new String[]{"something"}, null).minTermFreq(1).maxQueryTerms(12)).toQuery(createShardContext()); + Query parsedQuery = + parseQuery(moreLikeThisQuery(new String[]{"name.first", "name.last"}, new String[]{"something"}, null) + .minTermFreq(1).maxQueryTerms(12)).toQuery(createShardContext()); assertThat(parsedQuery, instanceOf(MoreLikeThisQuery.class)); MoreLikeThisQuery mltQuery = (MoreLikeThisQuery) parsedQuery; assertThat(mltQuery.getMoreLikeFields()[0], equalTo("name.first")); diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index ae313fef8b88f..0c27b7c39cf65 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -108,7 +108,8 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { query.slop(randomIntBetween(0, 5)); } - if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean() && (query.type() == Type.BEST_FIELDS || query.type() == Type.MOST_FIELDS)) { + if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean() && + (query.type() == Type.BEST_FIELDS || query.type() == Type.MOST_FIELDS)) { query.fuzziness(randomFuzziness(fieldName)); } if (randomBoolean()) { @@ -207,7 +208,8 @@ public void testToQueryMultipleTermsBooleanQuery() throws Exception { public void testToQueryMultipleFieldsDisableDismax() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); - Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(false).toQuery(createShardContext()); + Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .useDisMax(false).toQuery(createShardContext()); assertThat(query, instanceOf(DisjunctionMaxQuery.class)); DisjunctionMaxQuery dQuery = (DisjunctionMaxQuery) query; assertThat(dQuery.getTieBreakerMultiplier(), equalTo(1.0f)); @@ -218,7 +220,8 @@ public void testToQueryMultipleFieldsDisableDismax() throws Exception { public void testToQueryMultipleFieldsDisMaxQuery() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); - Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2).useDisMax(true).toQuery(createShardContext()); + Query query = multiMatchQuery("test").field(STRING_FIELD_NAME).field(STRING_FIELD_NAME_2) + .useDisMax(true).toQuery(createShardContext()); assertThat(query, instanceOf(DisjunctionMaxQuery.class)); DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) query; assertThat(disMaxQuery.getTieBreakerMultiplier(), equalTo(0.0f)); @@ -246,8 +249,10 @@ public void testToQueryFieldsWildcard() throws Exception { public void testToQueryFieldMissing() throws Exception { assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0); - assertThat(multiMatchQuery("test").field(MISSING_WILDCARD_FIELD_NAME).toQuery(createShardContext()), instanceOf(MatchNoDocsQuery.class)); - assertThat(multiMatchQuery("test").field(MISSING_FIELD_NAME).toQuery(createShardContext()), instanceOf(MatchNoDocsQuery.class)); + assertThat(multiMatchQuery("test").field(MISSING_WILDCARD_FIELD_NAME).toQuery(createShardContext()), + instanceOf(MatchNoDocsQuery.class)); + assertThat(multiMatchQuery("test").field(MISSING_FIELD_NAME).toQuery(createShardContext()), + instanceOf(MatchNoDocsQuery.class)); } public void testFromJson() throws IOException { @@ -353,7 +358,8 @@ public void testToFuzzyQuery() throws Exception { qb.fuzzyTranspositions(false); Query query = qb.toQuery(createShardContext()); - FuzzyQuery expected = new FuzzyQuery(new Term(STRING_FIELD_NAME, "text"), 2, 2, 5, false); + FuzzyQuery expected = new FuzzyQuery(new Term(STRING_FIELD_NAME, "text"), 2, 2, + 5, false); assertEquals(expected, query); } @@ -367,8 +373,9 @@ public void testDefaultField() throws Exception { assertThat(query, anyOf(instanceOf(AllTermQuery.class), instanceOf(DisjunctionMaxQuery.class))); context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", - STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build()) + newIndexMeta("index", context.getIndexSettings().getSettings(), + Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5") + .build()) ); MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello"); @@ -382,7 +389,8 @@ public void testDefaultField() throws Exception { assertEquals(expected, query); context.getIndexSettings().updateIndexMetaData( - newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field", + newIndexMeta("index", context.getIndexSettings().getSettings(), + Settings.builder().putList("index.query.default_field", STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build()) ); // should fail because lenient defaults to false @@ -462,7 +470,8 @@ public void testWithStopWords() throws Exception { ) .toQuery(createShardContext()); expected = new BooleanQuery.Builder() - .add(new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()), 0f), BooleanClause.Occur.SHOULD) + .add(new DisjunctionMaxQuery(Arrays.asList(new MatchNoDocsQuery(), new MatchNoDocsQuery()), + 0f), BooleanClause.Occur.SHOULD) .build(); assertEquals(expected, query); } diff --git a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java index 5275853af3a42..ffa14fa1c77fe 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SimpleQueryStringBuilderTests.java @@ -707,6 +707,33 @@ public void testWithPrefixStopWords() throws Exception { assertEquals(expected, query); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35773") + @Override + public void testToQuery() throws IOException { + super.testToQuery(); + } + + /** + * Test for behavior reported in https://github.com/elastic/elasticsearch/issues/34708 + * Unmapped field can lead to MatchNoDocsQuerys in disjunction queries. If tokens are eliminated (e.g. because + * the tokenizer removed them as punctuation) on regular fields, this can leave only MatchNoDocsQuerys in the + * disjunction clause. Instead those disjunctions should be eliminated completely. + */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35773") + public void testUnmappedFieldNoTokenWithAndOperator() throws IOException { + Query query = new SimpleQueryStringBuilder("first & second") + .field(STRING_FIELD_NAME) + .field("unmapped") + .field("another_unmapped") + .defaultOperator(Operator.AND) + .toQuery(createShardContext()); + BooleanQuery expected = new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "first")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "second")), BooleanClause.Occur.MUST) + .build(); + assertEquals(expected, query); + } + private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) { Settings build = Settings.builder().put(oldIndexSettings) .put(indexSettings) diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java index 0536dae6dfa39..1ffa85de0ecdf 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanNotQueryBuilderTests.java @@ -70,7 +70,8 @@ public void testIllegalArgument() { } public void testDist() { - SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"), new SpanTermQueryBuilder("name2", "value2")); + SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"), + new SpanTermQueryBuilder("name2", "value2")); assertThat(builder.pre(), equalTo(0)); assertThat(builder.post(), equalTo(0)); builder.dist(-4); @@ -82,7 +83,8 @@ public void testDist() { } public void testPrePost() { - SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"), new SpanTermQueryBuilder("name2", "value2")); + SpanNotQueryBuilder builder = new SpanNotQueryBuilder(new SpanTermQueryBuilder("name1", "value1"), + new SpanTermQueryBuilder("name2", "value2")); assertThat(builder.pre(), equalTo(0)); assertThat(builder.post(), equalTo(0)); builder.pre(-4).post(-4); diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java index 112de76b43e21..582ded0cb2a3f 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java @@ -138,7 +138,8 @@ public AtomicFieldData loadDirect(LeafReaderContext context) throws Exception { } @Override - public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, + XFieldComparatorSource.Nested nested, boolean reverse) { throw new UnsupportedOperationException(UNSUPPORTED); } @@ -228,7 +229,8 @@ public AtomicNumericFieldData loadDirect(LeafReaderContext context) throws Excep } @Override - public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, XFieldComparatorSource.Nested nested, boolean reverse) { + public SortField sortField(@Nullable Object missingValue, MultiValueMode sortMode, + XFieldComparatorSource.Nested nested, boolean reverse) { throw new UnsupportedOperationException(UNSUPPORTED); } @@ -246,11 +248,14 @@ public Index index() { private static final ScoreFunction RANDOM_SCORE_FUNCTION = new RandomScoreFunction(0, 0, new IndexFieldDataStub()); private static final ScoreFunction FIELD_VALUE_FACTOR_FUNCTION = new FieldValueFactorFunction("test", 1, FieldValueFactorFunction.Modifier.LN, 1.0, null); - private static final ScoreFunction GAUSS_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, + private static final ScoreFunction GAUSS_DECAY_FUNCTION = + new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, GaussDecayFunctionBuilder.GAUSS_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX); - private static final ScoreFunction EXP_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, + private static final ScoreFunction EXP_DECAY_FUNCTION = + new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, ExponentialDecayFunctionBuilder.EXP_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX); - private static final ScoreFunction LIN_DECAY_FUNCTION = new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, + private static final ScoreFunction LIN_DECAY_FUNCTION = + new DecayFunctionBuilder.NumericFieldDataScoreFunction(0, 1, 0.1, 0, LinearDecayFunctionBuilder.LINEAR_DECAY_FUNCTION, new IndexNumericFieldDataStub(), MultiValueMode.MAX); private static final ScoreFunction WEIGHT_FACTOR_FUNCTION = new WeightFactorFunction(4); private static final String TEXT = "The way out is through."; @@ -319,7 +324,8 @@ public void testExplainFunctionScoreQuery() throws IOException { } public Explanation getFunctionScoreExplanation(IndexSearcher searcher, ScoreFunction scoreFunction) throws IOException { - FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, CombineFunction.AVG,0.0f, 100); + FunctionScoreQuery functionScoreQuery = new FunctionScoreQuery(new TermQuery(TERM), scoreFunction, + CombineFunction.AVG,0.0f, 100); Weight weight = searcher.createNormalizedWeight(functionScoreQuery, true); Explanation explanation = weight.explain(searcher.getIndexReader().leaves().get(0), 0); return explanation.getDetails()[1]; @@ -370,7 +376,8 @@ public void testExplainFiltersFunctionScoreQuery() throws IOException { checkFiltersFunctionScoreExplanation(functionExplanation, "random score function (seed: 0, field: test)", 0); assertThat(functionExplanation.getDetails()[0].getDetails()[0].getDetails()[1].getDetails().length, equalTo(0)); - checkFiltersFunctionScoreExplanation(functionExplanation, "field value function: ln(doc['test'].value?:1.0 * factor=1.0)", 1); + checkFiltersFunctionScoreExplanation(functionExplanation, + "field value function: ln(doc['test'].value?:1.0 * factor=1.0)", 1); assertThat(functionExplanation.getDetails()[0].getDetails()[1].getDetails()[1].getDetails().length, equalTo(0)); checkFiltersFunctionScoreExplanation(functionExplanation, "Function for field test:", 2); @@ -408,7 +415,8 @@ public FunctionScoreQuery getFiltersFunctionScoreQuery(FunctionScoreQuery.ScoreM filterFunctions[i] = new FunctionScoreQuery.FilterScoreFunction( new TermQuery(TERM), scoreFunctions[i]); } - return new FunctionScoreQuery(new TermQuery(TERM), scoreMode, filterFunctions, combineFunction,Float.MAX_VALUE * -1, Float.MAX_VALUE); + return new FunctionScoreQuery(new TermQuery(TERM), + scoreMode, filterFunctions, combineFunction,Float.MAX_VALUE * -1, Float.MAX_VALUE); } public void checkFiltersFunctionScoreExplanation(Explanation randomExplanation, String functionExpl, int whichFunction) { @@ -626,15 +634,19 @@ public void testFunctionScoreHashCodeAndEquals() { float maxBoost = randomBoolean() ? Float.POSITIVE_INFINITY : randomFloat(); ScoreFunction function = new DummyScoreFunction(combineFunction); - FunctionScoreQuery q = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost); - FunctionScoreQuery q1 = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost); + FunctionScoreQuery q = + new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost); + FunctionScoreQuery q1 = + new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost); assertEquals(q, q); assertEquals(q.hashCode(), q.hashCode()); assertEquals(q, q1); assertEquals(q.hashCode(), q1.hashCode()); - FunctionScoreQuery diffQuery = new FunctionScoreQuery(new TermQuery(new Term("foo", "baz")), function, combineFunction, minScore, maxBoost); - FunctionScoreQuery diffMinScore = new FunctionScoreQuery(q.getSubQuery(), function, combineFunction, minScore == null ? 1.0f : null, maxBoost); + FunctionScoreQuery diffQuery = + new FunctionScoreQuery(new TermQuery(new Term("foo", "baz")), function, combineFunction, minScore, maxBoost); + FunctionScoreQuery diffMinScore = + new FunctionScoreQuery(q.getSubQuery(), function, combineFunction, minScore == null ? 1.0f : null, maxBoost); ScoreFunction otherFunction = new DummyScoreFunction(combineFunction); FunctionScoreQuery diffFunction = new FunctionScoreQuery(q.getSubQuery(), otherFunction, combineFunction, minScore, maxBoost); FunctionScoreQuery diffMaxBoost = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), @@ -665,10 +677,12 @@ public void testFilterFunctionScoreHashCodeAndEquals() { Float minScore = randomBoolean() ? null : 1.0f; Float maxBoost = randomBoolean() ? Float.POSITIVE_INFINITY : randomFloat(); - FilterScoreFunction function = new FilterScoreFunction(new TermQuery(new Term("filter", "query")), scoreFunction); + FilterScoreFunction function = + new FilterScoreFunction(new TermQuery(new Term("filter", "query")), scoreFunction); FunctionScoreQuery q = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost); - FunctionScoreQuery q1 = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost); + FunctionScoreQuery q1 = + new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), function, combineFunction, minScore, maxBoost); assertEquals(q, q); assertEquals(q.hashCode(), q.hashCode()); assertEquals(q, q1); @@ -683,7 +697,8 @@ public void testFilterFunctionScoreHashCodeAndEquals() { function, combineFunction, minScore == null ? 0.9f : null, maxBoost); FilterScoreFunction otherFunc = new FilterScoreFunction(new TermQuery(new Term("filter", "other_query")), scoreFunction); FunctionScoreQuery diffFunc = new FunctionScoreQuery(new TermQuery(new Term("foo", "bar")), randomFrom(ScoreMode.values()), - randomBoolean() ? new ScoreFunction[] { function, otherFunc } : new ScoreFunction[] { otherFunc }, combineFunction, minScore, maxBoost); + randomBoolean() ? new ScoreFunction[] { function, otherFunc } : + new ScoreFunction[] { otherFunc }, combineFunction, minScore, maxBoost); FunctionScoreQuery[] queries = new FunctionScoreQuery[] { diffQuery, diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index 4ddb80c4b0633..549397db752ae 100644 --- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -89,8 +89,10 @@ public void testGeohashCellSize() { double equatorialDistance = 2 * Math.PI * 6378137.0; double polarDistance = Math.PI * 6356752.314245; assertThat(GeoUtils.geoHashCellSize(0), equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2)))); - assertThat(GeoUtils.geoHashCellSize(1), equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 8, 2)))); - assertThat(GeoUtils.geoHashCellSize(2), equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2)))); + assertThat(GeoUtils.geoHashCellSize(1), + equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 8, 2)))); + assertThat(GeoUtils.geoHashCellSize(2), + equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2)))); assertThat(GeoUtils.geoHashCellSize(3), equalTo(Math.sqrt(Math.pow(polarDistance / 128, 2) + Math.pow(equatorialDistance / 256, 2)))); assertThat(GeoUtils.geoHashCellSize(4), @@ -167,13 +169,20 @@ public void testQuadTreeCellHeight() { public void testQuadTreeCellSize() { double equatorialDistance = 2 * Math.PI * 6378137.0; double polarDistance = Math.PI * 6356752.314245; - assertThat(GeoUtils.quadTreeCellSize(0), equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2)))); - assertThat(GeoUtils.quadTreeCellSize(1), equalTo(Math.sqrt(Math.pow(polarDistance / 2, 2) + Math.pow(equatorialDistance / 2, 2)))); - assertThat(GeoUtils.quadTreeCellSize(2), equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 4, 2)))); - assertThat(GeoUtils.quadTreeCellSize(3), equalTo(Math.sqrt(Math.pow(polarDistance / 8, 2) + Math.pow(equatorialDistance / 8, 2)))); - assertThat(GeoUtils.quadTreeCellSize(4), equalTo(Math.sqrt(Math.pow(polarDistance / 16, 2) + Math.pow(equatorialDistance / 16, 2)))); - assertThat(GeoUtils.quadTreeCellSize(5), equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2)))); - assertThat(GeoUtils.quadTreeCellSize(6), equalTo(Math.sqrt(Math.pow(polarDistance / 64, 2) + Math.pow(equatorialDistance / 64, 2)))); + assertThat(GeoUtils.quadTreeCellSize(0), + equalTo(Math.sqrt(Math.pow(polarDistance, 2) + Math.pow(equatorialDistance, 2)))); + assertThat(GeoUtils.quadTreeCellSize(1), + equalTo(Math.sqrt(Math.pow(polarDistance / 2, 2) + Math.pow(equatorialDistance / 2, 2)))); + assertThat(GeoUtils.quadTreeCellSize(2), + equalTo(Math.sqrt(Math.pow(polarDistance / 4, 2) + Math.pow(equatorialDistance / 4, 2)))); + assertThat(GeoUtils.quadTreeCellSize(3), + equalTo(Math.sqrt(Math.pow(polarDistance / 8, 2) + Math.pow(equatorialDistance / 8, 2)))); + assertThat(GeoUtils.quadTreeCellSize(4), + equalTo(Math.sqrt(Math.pow(polarDistance / 16, 2) + Math.pow(equatorialDistance / 16, 2)))); + assertThat(GeoUtils.quadTreeCellSize(5), + equalTo(Math.sqrt(Math.pow(polarDistance / 32, 2) + Math.pow(equatorialDistance / 32, 2)))); + assertThat(GeoUtils.quadTreeCellSize(6), + equalTo(Math.sqrt(Math.pow(polarDistance / 64, 2) + Math.pow(equatorialDistance / 64, 2)))); assertThat(GeoUtils.quadTreeCellSize(7), equalTo(Math.sqrt(Math.pow(polarDistance / 128, 2) + Math.pow(equatorialDistance / 128, 2)))); assertThat(GeoUtils.quadTreeCellSize(8), @@ -581,7 +590,8 @@ public void testPrefixTreeCellSizes() { assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height)); assertThat(GeoUtils.geoHashLevelsForPrecision(size), equalTo(geohashPrefixTree.getLevelForDistance(degrees))); - assertThat("width at level " + i, gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR)); + assertThat("width at level " + i, + gNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR)); assertThat("height at level " + i, gNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height / GeoUtils.EARTH_POLAR_DISTANCE)); @@ -602,7 +612,8 @@ public void testPrefixTreeCellSizes() { assertThat(GeoUtils.quadTreeCellHeight(level), lessThanOrEqualTo(height)); assertThat(GeoUtils.quadTreeLevelsForPrecision(size), equalTo(quadPrefixTree.getLevelForDistance(degrees))); - assertThat("width at level " + i, qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR)); + assertThat("width at level " + i, + qNode.getShape().getBoundingBox().getWidth(), equalTo(360.d * width / GeoUtils.EARTH_EQUATOR)); assertThat("height at level " + i, qNode.getShape().getBoundingBox().getHeight(), equalTo(180.d * height / GeoUtils.EARTH_POLAR_DISTANCE)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java b/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java index d4dc71388ac7d..da4b55f1a633b 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTestCase.java @@ -221,8 +221,10 @@ public void testNestedSorting() throws Exception { IndexSearcher searcher = new IndexSearcher(directoryReader); Query parentFilter = new TermQuery(new Term("__type", "parent")); Query childFilter = Queries.not(parentFilter); - XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter)); - ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); + XFieldComparatorSource nestedComparatorSource = + createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter)); + ToParentBlockJoinQuery query = + new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, 5, sort); @@ -255,7 +257,8 @@ public void testNestedSorting() throws Exception { assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9)); childFilter = new TermQuery(new Term("filter_1", "T")); - nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter)); + nestedComparatorSource = + createFieldComparator("field2", sortMode, null, createNested(searcher, parentFilter, childFilter)); query = new ToParentBlockJoinQuery( new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), @@ -291,7 +294,8 @@ public void testNestedSorting() throws Exception { assertThat(topDocs.scoreDocs[4].doc, equalTo(3)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9)); - nestedComparatorSource = createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter)); + nestedComparatorSource = + createFieldComparator("field2", sortMode, 127, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort); assertThat(topDocs.totalHits, equalTo(8L)); @@ -307,7 +311,8 @@ public void testNestedSorting() throws Exception { assertThat(topDocs.scoreDocs[4].doc, equalTo(7)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(8)); - nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); + nestedComparatorSource = + createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource)); topDocs = searcher.search(new TermQuery(new Term("__type", "parent")), 5, sort); assertThat(topDocs.totalHits, equalTo(8L)); @@ -332,8 +337,10 @@ public void testNestedSorting() throws Exception { protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; Query childFilter = Queries.not(parentFilter); - XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); - Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); + XFieldComparatorSource nestedComparatorSource = + createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); + Query query = + new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); assertThat(topDocs.totalHits, equalTo(7L)); @@ -352,6 +359,7 @@ protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) th protected abstract IndexableField createField(String name, int value); - protected abstract IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested); + protected abstract IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, + Object missingValue, Nested nested); } diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java index c643ea6cee045..3e3dadcde5cfc 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java @@ -51,7 +51,8 @@ protected String getFieldDataType() { } @Override - protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) { + protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, + Object missingValue, Nested nested) { IndexNumericFieldData fieldData = getForField(fieldName); return new DoubleValuesComparatorSource(fieldData, missingValue, sortMode, nested); } @@ -65,8 +66,10 @@ protected IndexableField createField(String name, int value) { protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; Query childFilter = Queries.not(parentFilter); - XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); - Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); + XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, + createNested(searcher, parentFilter, childFilter)); + Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), + new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); assertThat(topDocs.totalHits, equalTo(7L)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java index 13d0e83e37e01..1bf7252fc8d99 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java @@ -51,7 +51,8 @@ protected String getFieldDataType() { } @Override - protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) { + protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, + Object missingValue, Nested nested) { IndexNumericFieldData fieldData = getForField(fieldName); return new FloatValuesComparatorSource(fieldData, missingValue, sortMode, nested); } @@ -61,11 +62,14 @@ protected IndexableField createField(String name, int value) { return new SortedNumericDocValuesField(name, NumericUtils.floatToSortableInt(value)); } - protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException { + protected void assertAvgScoreMode(Query parentFilter, IndexSearcher searcher, + IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; Query childFilter = Queries.not(parentFilter); - XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); - Query query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); + XFieldComparatorSource nestedComparatorSource = + createFieldComparator("field2", sortMode, -127, createNested(searcher, parentFilter, childFilter)); + Query query = + new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopDocs topDocs = searcher.search(query, 5, sort); assertThat(topDocs.totalHits, equalTo(7L)); diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java index c8f2a5f9dfac3..1e271c47d6f07 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/LongNestedSortingTests.java @@ -34,7 +34,8 @@ protected String getFieldDataType() { } @Override - protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, Object missingValue, Nested nested) { + protected IndexFieldData.XFieldComparatorSource createFieldComparator(String fieldName, MultiValueMode sortMode, + Object missingValue, Nested nested) { IndexNumericFieldData fieldData = getForField(fieldName); return new LongValuesComparatorSource(fieldData, missingValue, sortMode, nested); } diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 1300debd5ebda..1aa2fbc92641c 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -129,7 +129,8 @@ public void testDuel() throws Exception { searcher.getIndexReader().close(); } - private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData indexFieldData, String missingValue, MultiValueMode sortMode, int n, boolean reverse) throws IOException { + private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData indexFieldData, String missingValue, + MultiValueMode sortMode, int n, boolean reverse) throws IOException { Query parentFilter = new TermQuery(new Term("__type", "parent")); Query childFilter = new TermQuery(new Term("__type", "child")); SortField sortField = indexFieldData.sortField(missingValue, sortMode, createNested(searcher, parentFilter, childFilter), reverse); @@ -299,8 +300,10 @@ public void testNestedSorting() throws Exception { PagedBytesIndexFieldData indexFieldData = getForField("field2"); Query parentFilter = new TermQuery(new Term("__type", "parent")); Query childFilter = Queries.not(parentFilter); - BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); - ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); + BytesRefFieldComparatorSource nestedComparatorSource = + new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); + ToParentBlockJoinQuery query = + new ToParentBlockJoinQuery(new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); TopFieldDocs topDocs = searcher.search(query, 5, sort); @@ -318,7 +321,8 @@ public void testNestedSorting() throws Exception { assertThat(((BytesRef) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).utf8ToString(), equalTo("i")); sortMode = MultiValueMode.MAX; - nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); + nestedComparatorSource = + new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); sort = new Sort(new SortField("field2", nestedComparatorSource, true)); topDocs = searcher.search(query, 5, sort); assertThat(topDocs.totalHits, equalTo(7L)); @@ -339,7 +343,8 @@ public void testNestedSorting() throws Exception { bq.add(parentFilter, Occur.MUST_NOT); bq.add(new TermQuery(new Term("filter_1", "T")), Occur.MUST); childFilter = bq.build(); - nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); + nestedComparatorSource = + new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(searcher, parentFilter, childFilter)); query = new ToParentBlockJoinQuery( new ConstantScoreQuery(childFilter), new QueryBitSetProducer(parentFilter), diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 0487d727af2a5..3c05550ac04a2 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -223,10 +223,13 @@ public void testUpdatePriority() { assertAcked(client().admin().indices().prepareCreate("test") .setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 200))); IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); - client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400) + assertEquals(200, + indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); + client().admin().indices().prepareUpdateSettings("test") + .setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400) .build()).get(); - assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); + assertEquals(400, + indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue()); } public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { @@ -238,7 +241,8 @@ public void testIndexDirIsDeletedWhenShardRemoved() throws Exception { .build(); createIndex("test", idxSettings); ensureGreen("test"); - client().prepareIndex("test", "bar", "1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex("test", "bar", "1") + .setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); SearchResponse response = client().prepareSearch("test").get(); assertHitCount(response, 1L); client().admin().indices().prepareDelete("test").get(); @@ -509,7 +513,8 @@ public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable { IndexShard shard = indexService.getShardOrNull(0); client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); client().prepareDelete("test", "test", "0").get(); - client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}", XContentType.JSON) + .setRefreshPolicy(IMMEDIATE).get(); IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {}; shard.close("simon says", false); @@ -575,14 +580,17 @@ public void testCircuitBreakerIncrementedByIndexShard() throws Exception { .setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0)).get(); // Generate a couple of segments - client().prepareIndex("test", "_doc", "1").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE).get(); + client().prepareIndex("test", "_doc", "1") + .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) + .setRefreshPolicy(IMMEDIATE).get(); // Use routing so 2 documents are guaranteed to be on the same shard String routing = randomAlphaOfLength(5); - client().prepareIndex("test", "_doc", "2").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE).setRouting(routing).get(); - client().prepareIndex("test", "_doc", "3").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) - .setRefreshPolicy(IMMEDIATE).setRouting(routing).get(); + client().prepareIndex("test", "_doc", "2") + .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) + .setRefreshPolicy(IMMEDIATE).setRouting(routing).get(); + client().prepareIndex("test", "_doc", "3") + .setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON) + .setRefreshPolicy(IMMEDIATE).setRouting(routing).get(); checkAccountingBreaker(); // Test that force merging causes the breaker to be correctly adjusted @@ -596,7 +604,8 @@ public void testCircuitBreakerIncrementedByIndexShard() throws Exception { // Test that we're now above the parent limit due to the segments Exception e = expectThrows(Exception.class, - () -> client().prepareSearch("test").addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get()); + () -> client().prepareSearch("test") + .addAggregation(AggregationBuilders.terms("foo_terms").field("foo.keyword")).get()); logger.info("--> got: {}", ExceptionsHelper.detailedMessage(e)); assertThat(ExceptionsHelper.detailedMessage(e), containsString("[parent] Data too large, data for []")); @@ -627,9 +636,10 @@ public static final IndexShard newIndexShard(IndexService indexService, IndexSha CircuitBreakerService cbs, IndexingOperationListener... listeners) throws IOException { ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry()); IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(), - shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(), indexService.similarityService(), - shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, - indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners), () -> {}, cbs); + shard.store(), indexService.getIndexSortSupplier(), indexService.cache(), indexService.mapperService(), + indexService.similarityService(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper, + indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners), + () -> {}, cbs); return newShard; } @@ -643,7 +653,9 @@ private static ShardRouting getInitializingShardRouting(ShardRouting existingSha } public void testGlobalCheckpointListeners() throws Exception { - createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()); + createIndex("test", Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0).build()); ensureGreen(); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); final IndexService test = indicesService.indexService(resolveIndex("test")); @@ -689,7 +701,9 @@ public void testGlobalCheckpointListeners() throws Exception { } public void testGlobalCheckpointListenerTimeout() throws InterruptedException { - createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build()); + createIndex("test", Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0).build()); ensureGreen(); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); final IndexService test = indicesService.indexService(resolveIndex("test")); @@ -716,7 +730,9 @@ public void testGlobalCheckpointListenerTimeout() throws InterruptedException { } public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Exception { - createIndex("test", Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0) + createIndex("test", Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) .put("index.refresh_interval", -1).build()); ensureGreen(); final IndicesService indicesService = getInstanceFromNode(IndicesService.class); @@ -742,7 +758,8 @@ public void testInvalidateIndicesRequestCacheWhenRollbackEngine() throws Excepti } shard.refresh("test"); try (Engine.Searcher searcher = shard.acquireSearcher("test")) { - assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs, (long) searcher.reader().numDocs(), equalTo(numDocs + moreDocs)); + assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs, + (long) searcher.reader().numDocs(), equalTo(numDocs + moreDocs)); } assertThat("numDocs=" + numDocs + " moreDocs=" + moreDocs, client().search(countRequest).actionGet().getHits().totalHits, equalTo(numDocs + moreDocs)); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 3d63efcb05e6c..2248aa0469fe4 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -613,7 +613,8 @@ public void onFailure(Exception e) { fail(); } }, - ThreadPool.Names.WRITE, "")).getMessage(), containsString("in primary mode cannot be a replication target")); + ThreadPool.Names.WRITE, "")).getMessage(), + containsString("in primary mode cannot be a replication target")); } closeShards(indexShard); @@ -720,7 +721,8 @@ public void onFailure(Exception e) { assertTrue(onFailure.get()); assertThat(onFailureException.get(), instanceOf(IllegalStateException.class)); assertThat( - onFailureException.get(), hasToString(containsString("operation primary term [" + (primaryTerm - 1) + "] is too old"))); + onFailureException.get(), + hasToString(containsString("operation primary term [" + (primaryTerm - 1) + "] is too old"))); } { @@ -869,7 +871,8 @@ public void testGlobalCheckpointSync() throws IOException { .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 2) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .build(); - final IndexMetaData.Builder indexMetadata = IndexMetaData.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1); + final IndexMetaData.Builder indexMetadata = + IndexMetaData.builder(shardRouting.getIndexName()).settings(settings).primaryTerm(0, 1); final AtomicBoolean synced = new AtomicBoolean(); final IndexShard primaryShard = newShard(shardRouting, indexMetadata.build(), null, new InternalEngineFactory(), () -> synced.set(true)); @@ -1237,7 +1240,8 @@ public void testShardStatsWithFailures() throws IOException { allowShardFailures(); final ShardId shardId = new ShardId("index", "_na_", 0); final ShardRouting shardRouting = - newShardRouting(shardId, "node", true, ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); + newShardRouting(shardId, "node", true, + ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); @@ -1771,7 +1775,8 @@ public void testPrimaryHandOffUpdatesLocalCheckpoint() throws IOException { for (int i = 0; i < totalOps; i++) { indexDoc(primarySource, "_doc", Integer.toString(i)); } - IndexShardTestCase.updateRoutingEntry(primarySource, primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); + IndexShardTestCase.updateRoutingEntry(primarySource, + primarySource.routingEntry().relocate(randomAlphaOfLength(10), -1)); final IndexShard primaryTarget = newShard(primarySource.routingEntry().getTargetRelocatingShard()); updateMappings(primaryTarget, primarySource.indexSettings().getIndexMetaData()); recoverReplica(primaryTarget, primarySource, true); @@ -1881,7 +1886,8 @@ public void testFailIfIndexNotPresentInRecoverFromStore() throws Exception { assertTrue(ex.getMessage().contains("failed to fetch index version after copying it over")); } - routing = ShardRoutingHelper.moveToUnassigned(routing, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so")); + routing = ShardRoutingHelper.moveToUnassigned(routing, + new UnassignedInfo(UnassignedInfo.Reason.INDEX_CREATED, "because I say so")); routing = ShardRoutingHelper.initialize(routing, newShard.routingEntry().currentNodeId()); assertTrue("it's already recovering, we should ignore new ones", newShard.ignoreRecoveryAttempt()); try { @@ -2019,7 +2025,8 @@ public void testSearcherWrapperIsUsed() throws IOException { indexDoc(shard, "_doc", "1", "{\"foobar\" : \"bar\"}"); shard.refresh("test"); - Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); + Engine.GetResult getResult = shard.get(new Engine.Get(false, false, "test", "1", + new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); getResult.release(); @@ -2059,7 +2066,8 @@ public IndexSearcher wrap(IndexSearcher searcher) throws EngineException { search = searcher.searcher().search(new TermQuery(new Term("foobar", "bar")), 10); assertEquals(search.totalHits, 1); } - getResult = newShard.get(new Engine.Get(false, false, "test", "1", new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); + getResult = newShard.get(new Engine.Get(false, false, "test", "1", + new Term(IdFieldMapper.NAME, Uid.encodeId("1")))); assertTrue(getResult.exists()); assertNotNull(getResult.searcher()); // make sure get uses the wrapped reader assertTrue(getResult.searcher().reader() instanceof FieldMaskingReader); @@ -2475,7 +2483,8 @@ public void testRecoverFromLocalShard() throws IOException { } assertThat(requestedMappingUpdates, hasKey("_doc")); - assertThat(requestedMappingUpdates.get("_doc").get().source().string(), equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}")); + assertThat(requestedMappingUpdates.get("_doc").get().source().string(), + equalTo("{\"properties\":{\"foo\":{\"type\":\"text\"}}}")); closeShards(sourceShard, targetShard); } @@ -2819,7 +2828,8 @@ public void testReadSnapshotAndCheckIndexConcurrently() throws Exception { .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum"))) .build(); final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, - null, null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); + null, null, indexShard.engineFactory, + indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); Store.MetadataSnapshot storeFileMetaDatas = newShard.snapshotStoreMetadata(); assertTrue("at least 2 files, commit and data: " + storeFileMetaDatas.toString(), storeFileMetaDatas.size() > 1); @@ -3192,14 +3202,16 @@ public void testFlushOnInactive() throws Exception { .putMapping("_doc", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) .primaryTerm(0, 1).build(); - ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState - .INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); + ShardRouting shardRouting = + TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, + ShardRoutingState.INITIALIZING, RecoverySource.EmptyStoreRecoverySource.INSTANCE); final ShardId shardId = shardRouting.shardId(); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); AtomicBoolean markedInactive = new AtomicBoolean(); AtomicReference primaryRef = new AtomicReference<>(); - IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, new InternalEngineFactory(), () -> { + IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, + new InternalEngineFactory(), () -> { }, new IndexEventListener() { @Override public void onShardInactive(IndexShard indexShard) { diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index fda2f8ef7d039..c666c63a4b4e6 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -43,8 +43,10 @@ public void testLoadShardPath() throws IOException { ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetaData.FORMAT.write(new ShardStateMetaData(true, "0xDEADBEEF", AllocationId.newInitializing()), path); - ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); + ShardStateMetaData.FORMAT.write(new ShardStateMetaData(true, "0xDEADBEEF", + AllocationId.newInitializing()), path); + ShardPath shardPath = + ShardPath.loadShardPath(logger, env, shardId, IndexSettingsModule.newIndexSettings(shardId.getIndex(), settings)); assertEquals(path, shardPath.getDataPath()); assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID()); assertEquals("foo", shardPath.getShardId().getIndexName()); @@ -87,7 +89,8 @@ public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { public void testIllegalCustomDataPath() { Index index = new Index("foo", "foo"); final Path path = createTempDir().resolve(index.getUUID()).resolve("0"); - Exception e = expectThrows(IllegalArgumentException.class, () -> new ShardPath(true, path, path, new ShardId(index, 0))); + Exception e = expectThrows(IllegalArgumentException.class, () -> + new ShardPath(true, path, path, new ShardId(index, 0))); assertThat(e.getMessage(), is("shard state path must be different to the data path when using custom data paths")); } diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java index fa269681bbf33..fd507fd7c2fa9 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedFileIT.java @@ -146,7 +146,8 @@ public void testCorruptFileAndRecover() throws ExecutionException, InterruptedEx .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1") .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files + // no translog based flush - it might change the .liv / segments.N files + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) )); ensureGreen(); disableAllocation("test"); @@ -222,7 +223,8 @@ public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard, } }; - for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) { + for (MockIndexEventListener.TestEventListener eventListener : + internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) { eventListener.setNewDelegate(listener); } try { @@ -230,7 +232,8 @@ public void afterIndexShardClosed(ShardId sid, @Nullable IndexShard indexShard, latch.await(); assertThat(exception, empty()); } finally { - for (MockIndexEventListener.TestEventListener eventListener : internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) { + for (MockIndexEventListener.TestEventListener eventListener : + internalCluster().getDataNodeInstances(MockIndexEventListener.TestEventListener.class)) { eventListener.setNewDelegate(null); } } @@ -248,7 +251,8 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files + // no translog based flush - it might change the .liv / segments.N files + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) )); ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; @@ -284,7 +288,8 @@ public void testCorruptPrimaryNoReplica() throws ExecutionException, Interrupted } assertThat(response.getStatus(), is(ClusterHealthStatus.RED)); ClusterState state = client().admin().cluster().prepareState().get().getState(); - GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false); + GroupShardsIterator shardIterators = + state.getRoutingTable().activePrimaryShardsGrouped(new String[]{"test"}, false); for (ShardIterator iterator : shardIterators) { ShardRouting routing; while ((routing = iterator.nextOrNull()) != null) { @@ -338,8 +343,10 @@ public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionExc final AtomicBoolean corrupt = new AtomicBoolean(true); final CountDownLatch hasCorrupted = new CountDownLatch(1); for (NodeStats dataNode : dataNodeStats) { - MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); - mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { + MockTransportService mockTransportService = + ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); + mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), + (connection, requestId, action, request, options) -> { if (corrupt.get() && action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; byte[] array = BytesRef.deepCopyOf(req.content().toBytesRef()).bytes; @@ -353,7 +360,8 @@ public void testCorruptionOnNetworkLayerFinalizingRecovery() throws ExecutionExc Settings build = Settings.builder() .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1") - .put("index.routing.allocation.include._name", primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName()).build(); + .put("index.routing.allocation.include._name", + primariesNode.getNode().getName() + "," + unluckyNode.getNode().getName()).build(); client().admin().indices().prepareUpdateSettings("test").setSettings(build).get(); client().admin().cluster().prepareReroute().get(); hasCorrupted.await(); @@ -369,7 +377,9 @@ public void testCorruptionOnNetworkLayer() throws ExecutionException, Interrupte int numDocs = scaledRandomIntBetween(100, 1000); internalCluster().ensureAtLeastNumDataNodes(2); if (cluster().numDataNodes() < 3) { - internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false)); + internalCluster().startNode(Settings.builder() + .put(Node.NODE_DATA_SETTING.getKey(), true) + .put(Node.NODE_MASTER_SETTING.getKey(), false)); } NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get(); List dataNodeStats = new ArrayList<>(); @@ -406,14 +416,17 @@ public void testCorruptionOnNetworkLayer() throws ExecutionException, Interrupte assertHitCount(countResponse, numDocs); final boolean truncate = randomBoolean(); for (NodeStats dataNode : dataNodeStats) { - MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); - mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), (connection, requestId, action, request, options) -> { + MockTransportService mockTransportService = + ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName())); + mockTransportService.addSendBehavior(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), + (connection, requestId, action, request, options) -> { if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) { RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request; if (truncate && req.length() > 1) { BytesRef bytesRef = req.content().toBytesRef(); BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1); - request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos()); + request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), + array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos()); } else { assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes : "no internal reference!!"; final byte[] array = req.content().toBytesRef().bytes; @@ -466,12 +479,16 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I int numDocs = scaledRandomIntBetween(100, 1000); internalCluster().ensureAtLeastNumDataNodes(2); - assertAcked(prepareCreate("test").setSettings(Settings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test - .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) - .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) // no checkindex - we corrupt shards on purpose - .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) // no translog based flush - it might change the .liv / segments.N files - )); + assertAcked( + prepareCreate("test").setSettings(Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") // no replicas for this test + .put(MergePolicyConfig.INDEX_MERGE_ENABLED, false) + // no checkindex - we corrupt shards on purpose + .put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false) + // no translog based flush - it might change the .liv / segments.N files + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)) + ) + ); ensureGreen(); IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < builders.length; i++) { @@ -495,7 +512,8 @@ public void testCorruptFileThenSnapshotAndRestore() throws ExecutionException, I .put("compress", randomBoolean()) .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES))); logger.info("--> snapshot"); - CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test").get(); + CreateSnapshotResponse createSnapshotResponse = client().admin().cluster() + .prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test").get(); assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.PARTIAL)); logger.info("failed during snapshot -- maybe SI file got corrupted"); final List files = listShardFiles(shardRouting); @@ -618,7 +636,8 @@ private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFile Set files = new TreeSet<>(); // treeset makes sure iteration order is deterministic for (FsInfo.Path info : nodeStatses.getNodes().get(0).getFs()) { String path = info.getPath(); - Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index"); + Path file = PathUtils.get(path) + .resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index"); if (Files.exists(file)) { // multi data path might only have one path in use try (DirectoryStream stream = Files.newDirectoryStream(file)) { for (Path item : stream) { @@ -661,8 +680,16 @@ private void pruneOldDeleteGenerations(Set files) { final String newSegmentName = IndexFileNames.parseSegmentName(current.getFileName().toString()); final String oldSegmentName = IndexFileNames.parseSegmentName(last.getFileName().toString()); if (newSegmentName.equals(oldSegmentName)) { - int oldGen = Integer.parseInt(IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(last.getFileName().toString())).replace("_", ""), Character.MAX_RADIX); - int newGen = Integer.parseInt(IndexFileNames.stripExtension(IndexFileNames.stripSegmentName(current.getFileName().toString())).replace("_", ""), Character.MAX_RADIX); + int oldGen = + Integer.parseInt( + IndexFileNames.stripExtension( + IndexFileNames.stripSegmentName(last.getFileName().toString())).replace("_", ""), + Character.MAX_RADIX + ); + int newGen = Integer.parseInt( + IndexFileNames.stripExtension( + IndexFileNames.stripSegmentName(current.getFileName().toString())).replace("_", ""), + Character.MAX_RADIX); if (newGen > oldGen) { files.remove(last); } else { diff --git a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java index 71284792a6817..7936e8efd5624 100644 --- a/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java +++ b/server/src/test/java/org/elasticsearch/index/store/CorruptedTranslogIT.java @@ -91,7 +91,8 @@ public void testCorruptTranslogFiles() throws Exception { // Restart the single node internalCluster().fullRestart(); - client().admin().cluster().prepareHealth().setWaitForYellowStatus().setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get(); + client().admin().cluster().prepareHealth().setWaitForYellowStatus(). + setTimeout(new TimeValue(1000, TimeUnit.MILLISECONDS)).setWaitForEvents(Priority.LANGUID).get(); try { client().prepareSearch("test").setQuery(matchAllQuery()).get(); @@ -130,13 +131,15 @@ private void corruptRandomTranslogFile() throws IOException { /** Disables translog flushing for the specified index */ private static void disableTranslogFlush(String index) { - Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).build(); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB)).build(); client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } /** Enables translog flushing for the specified index */ private static void enableTranslogFlush(String index) { - Settings settings = Settings.builder().put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)).build(); + Settings settings = Settings.builder() + .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(512, ByteSizeUnit.MB)).build(); client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } } diff --git a/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java index 6fa32df288512..155473c83cf30 100644 --- a/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java @@ -60,7 +60,8 @@ private void doTestStoreDirectory(Index index, Path tempDir, String typeSettingV } Settings settings = settingsBuilder.build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); - FsDirectoryService service = new FsDirectoryService(indexSettings, null, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0))); + FsDirectoryService service = new FsDirectoryService(indexSettings, null, + new ShardPath(false, tempDir, tempDir, new ShardId(index, 0))); try (Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { switch (type) { case NIOFS: diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index ca1832d1489fe..5ff6a9b7b1007 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -207,9 +207,10 @@ public void testVerifyingIndexOutput() throws IOException { public void testVerifyingIndexOutputOnEmptyFile() throws IOException { Directory dir = newDirectory(); - IndexOutput verifyingOutput = new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0), - MIN_SUPPORTED_LUCENE_VERSION), - dir.createOutput("foo1.bar", IOContext.DEFAULT)); + IndexOutput verifyingOutput = + new Store.LuceneVerifyingIndexOutput(new StoreFileMetaData("foo.bar", 0, Store.digestToString(0), + MIN_SUPPORTED_LUCENE_VERSION), + dir.createOutput("foo1.bar", IOContext.DEFAULT)); try { Store.verify(verifyingOutput); fail("should be a corrupted index"); @@ -310,13 +311,15 @@ public void testNewChecksums() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); // set default codec - all segments need checksums - IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec())); + IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), + new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec())); int docs = 1 + random().nextInt(100); for (int i = 0; i < docs; i++) { Document doc = new Document(); doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); } @@ -325,7 +328,8 @@ public void testNewChecksums() throws IOException { if (random().nextBoolean()) { Document doc = new Document(); doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.updateDocument(new Term("id", "" + i), doc); } } @@ -475,10 +479,13 @@ public void assertDeleteContent(Store store, Directory dir) throws IOException { public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException { for (String file : store.directory().listAll()) { - if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) { - assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file)); + if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && + !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) { + assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + + store.directory().listAll().length, metadata.asMap().containsKey(file)); } else { - assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file)); + assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + + store.directory().listAll().length, metadata.asMap().containsKey(file)); } } } @@ -489,7 +496,8 @@ public void testRecoveryDiff() throws IOException, InterruptedException { for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); docs.add(doc); } @@ -611,14 +619,17 @@ public void testRecoveryDiff() throws IOException, InterruptedException { Store.MetadataSnapshot newCommitMetaData = store.getMetadata(null); Store.RecoveryDiff newCommitDiff = newCommitMetaData.recoveryDiff(metadata); if (delFile != null) { - assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment + assertThat(newCommitDiff.identical.size(), + equalTo(newCommitMetaData.size() - 5)); // segments_N, del file, cfs, cfe, si for the new segment assertThat(newCommitDiff.different.size(), equalTo(1)); // the del file must be different assertThat(newCommitDiff.different.get(0).name(), endsWith(".liv")); assertThat(newCommitDiff.missing.size(), equalTo(4)); // segments_N,cfs, cfe, si for the new segment } else { - assertThat(newCommitDiff.identical.size(), equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment + assertThat(newCommitDiff.identical.size(), + equalTo(newCommitMetaData.size() - 4)); // segments_N, cfs, cfe, si for the new segment assertThat(newCommitDiff.different.size(), equalTo(0)); - assertThat(newCommitDiff.missing.size(), equalTo(4)); // an entire segment must be missing (single doc segment got dropped) plus the commit is different + assertThat(newCommitDiff.missing.size(), + equalTo(4)); // an entire segment must be missing (single doc segment got dropped) plus the commit is different } deleteContent(store.directory()); @@ -629,7 +640,8 @@ public void testCleanupFromSnapshot() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); Store store = new Store(shardId, INDEX_SETTINGS, StoreTests.newDirectory(random()), new DummyShardLock(shardId)); // this time random codec.... - IndexWriterConfig indexWriterConfig = newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()); + IndexWriterConfig indexWriterConfig = + newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()); // we keep all commits and that allows us clean based on multiple snapshots indexWriterConfig.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); IndexWriter writer = new IndexWriter(store.directory(), indexWriterConfig); @@ -642,7 +654,8 @@ public void testCleanupFromSnapshot() throws IOException { } Document doc = new Document(); doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); @@ -651,7 +664,8 @@ public void testCleanupFromSnapshot() throws IOException { writer.commit(); Document doc = new Document(); doc.add(new TextField("id", "" + docs++, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); writer.addDocument(doc); } @@ -663,7 +677,8 @@ public void testCleanupFromSnapshot() throws IOException { if (random().nextBoolean()) { Document doc = new Document(); doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); writer.updateDocument(new Term("id", "" + i), doc); } } @@ -713,7 +728,8 @@ public void testCleanupFromSnapshot() throws IOException { } public void testOnCloseCallback() throws IOException { - final ShardId shardId = new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), randomIntBetween(0, 100)); + final ShardId shardId = + new ShardId(new Index(randomRealisticUnicodeOfCodepointLengthBetween(1, 10), "_na_"), randomIntBetween(0, 100)); final AtomicInteger count = new AtomicInteger(0); final ShardLock lock = new DummyShardLock(shardId); @@ -815,8 +831,10 @@ public void testMetadataSnapshotStreaming() throws Exception { } protected Store.MetadataSnapshot createMetaDataSnapshot() { - StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION); - StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION); + StoreFileMetaData storeFileMetaData1 = + new StoreFileMetaData("segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION); + StoreFileMetaData storeFileMetaData2 = + new StoreFileMetaData("no_segments", 1, "666", MIN_SUPPORTED_LUCENE_VERSION); Map storeFileMetaDataMap = new HashMap<>(); storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1); storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2); @@ -857,7 +875,9 @@ public void testUserDataRead() throws IOException { public void testStreamStoreFilesMetaData() throws Exception { Store.MetadataSnapshot metadataSnapshot = createMetaDataSnapshot(); - TransportNodesListShardStoreMetaData.StoreFilesMetaData outStoreFileMetaData = new TransportNodesListShardStoreMetaData.StoreFilesMetaData(new ShardId("test", "_na_", 0),metadataSnapshot); + TransportNodesListShardStoreMetaData.StoreFilesMetaData outStoreFileMetaData = + new TransportNodesListShardStoreMetaData.StoreFilesMetaData(new ShardId("test", "_na_", 0), + metadataSnapshot); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); org.elasticsearch.Version targetNodeVersion = randomVersion(random()); @@ -866,7 +886,8 @@ public void testStreamStoreFilesMetaData() throws Exception { ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); InputStreamStreamInput in = new InputStreamStreamInput(inBuffer); in.setVersion(targetNodeVersion); - TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData = TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in); + TransportNodesListShardStoreMetaData.StoreFilesMetaData inStoreFileMetaData = + TransportNodesListShardStoreMetaData.StoreFilesMetaData.readStoreFilesMetaData(in); Iterator outFiles = outStoreFileMetaData.iterator(); for (StoreFileMetaData inFile : inStoreFileMetaData) { assertThat(inFile.name(), equalTo(outFiles.next().name())); @@ -885,7 +906,8 @@ public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException { for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(new StringField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); - doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + doc.add(new TextField("body", + TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random())))); docs.add(doc); } @@ -911,7 +933,8 @@ public void testMarkCorruptedOnTruncatedSegmentsFile() throws IOException { // expected } assertTrue(store.isMarkedCorrupted()); - Lucene.cleanLuceneIndex(store.directory()); // we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call + // we have to remove the index since it's corrupted and might fail the MocKDirWrapper checkindex call + Lucene.cleanLuceneIndex(store.directory()); store.close(); } @@ -980,7 +1003,8 @@ public void testCanReadOldCorruptionMarker() throws IOException { store.failIfCorrupted(); fail("should be corrupted"); } catch (CorruptIndexException e) { - assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid +"] caused by: CorruptIndexException[foo (resource=bar)]")); + assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid + + "] caused by: CorruptIndexException[foo (resource=bar)]")); assertTrue(e.getMessage().contains(ExceptionsHelper.stackTrace(exception))); } @@ -995,7 +1019,8 @@ public void testCanReadOldCorruptionMarker() throws IOException { store.failIfCorrupted(); fail("should be corrupted"); } catch (CorruptIndexException e) { - assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid + "] caused by: CorruptIndexException[foo (resource=bar)]")); + assertTrue(e.getMessage().startsWith("[index][1] Preexisting corrupted index [" + uuid + + "] caused by: CorruptIndexException[foo (resource=bar)]")); assertFalse(e.getMessage().contains(ExceptionsHelper.stackTrace(exception))); } diff --git a/server/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java b/server/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java index 7b7e7a41783a3..c355725f62acd 100644 --- a/server/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java +++ b/server/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsIT.java @@ -106,9 +106,12 @@ public void testSimpleStats() throws Exception { assertThat(suggest.getSuggestCurrent(), equalTo(0L)); // check suggest count - assertThat(suggest.getSuggestCount(), equalTo((long) (suggestAllIdx * totalShards + suggestIdx1 * shardsIdx1 + suggestIdx2 * shardsIdx2))); - assertThat(indicesStats.getIndices().get("test1").getTotal().getSearch().getTotal().getSuggestCount(), equalTo((long) ((suggestAllIdx + suggestIdx1) * shardsIdx1))); - assertThat(indicesStats.getIndices().get("test2").getTotal().getSearch().getTotal().getSuggestCount(), equalTo((long) ((suggestAllIdx + suggestIdx2) * shardsIdx2))); + assertThat(suggest.getSuggestCount(), + equalTo((long) (suggestAllIdx * totalShards + suggestIdx1 * shardsIdx1 + suggestIdx2 * shardsIdx2))); + assertThat(indicesStats.getIndices().get("test1").getTotal().getSearch().getTotal().getSuggestCount(), + equalTo((long) ((suggestAllIdx + suggestIdx1) * shardsIdx1))); + assertThat(indicesStats.getIndices().get("test2").getTotal().getSearch().getTotal().getSuggestCount(), + equalTo((long) ((suggestAllIdx + suggestIdx2) * shardsIdx2))); logger.info("iter {}, iter1 {}, iter2 {}, {}", suggestAllIdx, suggestIdx1, suggestIdx2, endTime - startTime); // check suggest time diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index da94cf1706485..3d061c3e9f883 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -524,7 +524,8 @@ public void testUncommittedOperations() throws Exception { } public void testTotalTests() { - final TranslogStats total = new TranslogStats(0, 0, 0, 0, 1); + final TranslogStats total = + new TranslogStats(0, 0, 0, 0, 1); final int n = randomIntBetween(0, 16); final List statsList = new ArrayList<>(n); for (int i = 0; i < n; i++) { @@ -552,21 +553,27 @@ public void testTotalTests() { } public void testNegativeNumberOfOperations() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(-1, 1, 1, 1, 1)); + IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, + () -> new TranslogStats(-1, 1, 1, 1, 1)); assertThat(e, hasToString(containsString("numberOfOperations must be >= 0"))); - e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, -1, 1, 1)); + e = expectThrows(IllegalArgumentException.class, + () -> new TranslogStats(1, 1, -1, 1, 1)); assertThat(e, hasToString(containsString("uncommittedOperations must be >= 0"))); } public void testNegativeSizeInBytes() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1, 1)); + IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, -1, 1, 1, 1)); assertThat(e, hasToString(containsString("translogSizeInBytes must be >= 0"))); - e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, -1, 1)); + e = expectThrows(IllegalArgumentException.class, + () -> new TranslogStats(1, 1, 1, -1, 1)); assertThat(e, hasToString(containsString("uncommittedSizeInBytes must be >= 0"))); } public void testOldestEntryInSeconds() { - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, 1, -1)); + IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> new TranslogStats(1, 1, 1, 1, -1)); assertThat(e, hasToString(containsString("earliestLastModifiedAge must be >= 0"))); } @@ -691,7 +698,8 @@ public void testSeqNoFilterSnapshot() throws Exception { List batch = LongStream.rangeClosed(0, between(0, 100)).boxed().collect(Collectors.toList()); Randomness.shuffle(batch); for (long seqNo : batch) { - Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1}); + Translog.Index op = + new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1}); translog.add(op); } translog.rollGeneration(); @@ -771,7 +779,8 @@ public void testConcurrentWritesWithVaryingSize() throws Throwable { final CountDownLatch downLatch = new CountDownLatch(1); for (int i = 0; i < threadCount; i++) { final int threadId = i; - threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions); + threads[i] = + new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions); threads[i].setDaemon(true); threads[i].start(); } @@ -838,7 +847,9 @@ public void testTranslogCorruption() throws Exception { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { String ascii = randomAlphaOfLengthBetween(1, 50); - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))); + locations.add( + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))) + ); } translog.close(); @@ -865,7 +876,9 @@ public void testTruncatedTranslogs() throws Exception { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { String ascii = randomAlphaOfLengthBetween(1, 50); - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))); + locations.add(translog.add( + new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))) + ); } translog.sync(); @@ -1124,13 +1137,16 @@ public void testSyncUpTo() throws IOException { for (int op = 0; op < translogOperations; op++) { int seqNo = ++count; final Translog.Location location = - translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), + Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); if (randomBoolean()) { assertTrue("at least one operation pending", translog.syncNeeded()); assertTrue("this operation has not been synced", translog.ensureSynced(location)); - assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced + // we are the last location so everything should be synced + assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); seqNo = ++count; - translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), + Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); assertTrue("one pending operation", translog.syncNeeded()); assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now assertTrue("we only synced a previous operation yet", translog.syncNeeded()); @@ -1159,17 +1175,20 @@ public void testSyncUpToStream() throws IOException { rollAndCommit(translog); // do this first so that there is at least one pending tlog entry } final Translog.Location location = - translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); locations.add(location); } Collections.shuffle(locations, random()); if (randomBoolean()) { assertTrue("at least one operation pending", translog.syncNeeded()); assertTrue("this operation has not been synced", translog.ensureSynced(locations.stream())); - assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced + // we are the last location so everything should be synced + assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); } else if (rarely()) { rollAndCommit(translog); - assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); // not syncing now + // not syncing now + assertFalse("location is from a previous translog - already synced", translog.ensureSynced(locations.stream())); assertFalse("no sync needed since no operations in current translog", translog.syncNeeded()); } else { translog.sync(); @@ -1187,7 +1206,8 @@ public void testLocationComparison() throws IOException { int count = 0; for (int op = 0; op < translogOperations; op++) { locations.add( - translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); + translog.add(new Translog.Index("test", "" + op, op, + primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); if (rarely() && translogOperations > op + 1) { rollAndCommit(translog); } @@ -1224,7 +1244,8 @@ public void testBasicCheckpoint() throws IOException { int lastSynced = -1; long lastSyncedGlobalCheckpoint = globalCheckpoint.get(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (randomBoolean()) { globalCheckpoint.set(globalCheckpoint.get() + randomIntBetween(1, 16)); } @@ -1239,7 +1260,8 @@ public void testBasicCheckpoint() throws IOException { Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8")))); final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)); - try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) { + try (TranslogReader reader = + translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) { assertEquals(lastSynced + 1, reader.totalOperations()); TranslogSnapshot snapshot = reader.newSnapshot(); @@ -1282,7 +1304,8 @@ public void testTranslogWriter() throws IOException { } writer.sync(); - final BaseTranslogReader reader = randomBoolean() ? writer : translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME))); + final BaseTranslogReader reader = randomBoolean() ? writer : + translog.openReader(writer.path(), Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME))); for (int i = 0; i < numOps; i++) { ByteBuffer buffer = ByteBuffer.allocate(4); reader.readBytes(buffer, reader.getFirstOperationOffset() + 4 * i); @@ -1360,7 +1383,8 @@ public void testBasicRecovery() throws IOException { int minUncommittedOp = -1; final boolean commitOften = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); final boolean commit = commitOften ? frequently() : rarely(); if (commit && op < translogOperations - 1) { rollAndCommit(translog); @@ -1381,8 +1405,10 @@ public void testBasicRecovery() throws IOException { assertNull(snapshot.next()); } } else { - translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); - assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); + translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + assertEquals("lastCommitted must be 1 less than current", + translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration, Long.MAX_VALUE)) { for (int i = minUncommittedOp; i < translogOperations; i++) { @@ -1403,7 +1429,8 @@ public void testRecoveryUncommitted() throws IOException { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1420,9 +1447,11 @@ public void testRecoveryUncommitted() throws IOException { TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); - assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); + assertEquals("lastCommitted must be 2 less than current - we never finished the commit", + translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; @@ -1434,7 +1463,8 @@ public void testRecoveryUncommitted() throws IOException { } } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1444,7 +1474,8 @@ public void testRecoveryUncommitted() throws IOException { for (int i = 0; i < upTo; i++) { Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null synced: " + sync, next); - assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString())); + assertEquals("payload mismatch, synced: " + sync, i, + Integer.parseInt(next.getSource().source.utf8ToString())); } } } @@ -1459,7 +1490,8 @@ public void testRecoveryUncommittedFileExists() throws IOException { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1480,9 +1512,11 @@ public void testRecoveryUncommittedFileExists() throws IOException { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); - assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); + assertEquals("lastCommitted must be 2 less than current - we never finished the commit", + translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; @@ -1495,7 +1529,8 @@ public void testRecoveryUncommittedFileExists() throws IOException { } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1505,7 +1540,8 @@ public void testRecoveryUncommittedFileExists() throws IOException { for (int i = 0; i < upTo; i++) { Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null synced: " + sync, next); - assertEquals("payload mismatch, synced: " + sync, i, Integer.parseInt(next.getSource().source.utf8ToString())); + assertEquals("payload mismatch, synced: " + sync, i, + Integer.parseInt(next.getSource().source.utf8ToString())); } } } @@ -1519,7 +1555,8 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, + primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1534,21 +1571,28 @@ public void testRecoveryUncommittedCorruptedCheckpoint() throws IOException { TranslogConfig config = translog.getConfig(); Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); Checkpoint read = Checkpoint.read(ckp); - Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, SequenceNumbers.NO_OPS_PERFORMED, 0); - Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + Checkpoint corrupted = Checkpoint.emptyTranslogCheckpoint(0, 0, + SequenceNumbers.NO_OPS_PERFORMED, 0); + Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), + corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { fail("corrupted"); } catch (IllegalStateException ex) { assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3135, " + - "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1, trimmedAboveSeqNo=-2} but got: Checkpoint{offset=0, numOps=0, " + - "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0, trimmedAboveSeqNo=-2}", ex.getMessage()); - } - Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1, trimmedAboveSeqNo=-2}" + + " but got: Checkpoint{offset=0, numOps=0, generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1," + + " minTranslogGeneration=0, trimmedAboveSeqNo=-2}", ex.getMessage()); + } + Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), + read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); - assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); + assertEquals("lastCommitted must be 2 less than current - we never finished the commit", + translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = new SortedSnapshot(translog.newSnapshot())) { int upTo = sync ? translogOperations : prepareOp; @@ -1566,7 +1610,8 @@ public void testSnapshotFromStreamInput() throws IOException { List ops = new ArrayList<>(); int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { - Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))); + Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8"))); ops.add(test); } Translog.writeOperations(out, ops); @@ -1693,7 +1738,8 @@ public void testRandomExceptionsOnTrimOperations( ) throws Exception { TranslogConfig config = getTranslogConfig(tempDir); List fileChannels = new ArrayList<>(); final Translog failableTLog = - getFailableTranslog(fail, config, randomBoolean(), false, null, createTranslogDeletionPolicy(), fileChannels); + getFailableTranslog(fail, config, randomBoolean(), + false, null, createTranslogDeletionPolicy(), fileChannels); IOException expectedException = null; int translogOperations = 0; @@ -1767,8 +1813,10 @@ public void testLocationHashCodeEquals() throws IOException { int translogOperations = randomIntBetween(10, 100); try (Translog translog2 = create(createTempDir())) { for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); - locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); } int iters = randomIntBetween(10, 100); for (int i = 0; i < iters; i++) { @@ -1794,7 +1842,8 @@ public void testOpenForeignTranslog() throws IOException { int translogOperations = randomIntBetween(1, 10); int firstUncommitted = 0; for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (randomBoolean()) { rollAndCommit(translog); firstUncommitted = op + 1; @@ -1826,10 +1875,12 @@ public void testOpenForeignTranslog() throws IOException { } public void testFailOnClosedWrite() throws IOException { - translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), + Integer.toString(1).getBytes(Charset.forName("UTF-8")))); translog.close(); try { - translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), + Integer.toString(1).getBytes(Charset.forName("UTF-8")))); fail("closed"); } catch (AlreadyClosedException ex) { // all is well @@ -1849,7 +1900,8 @@ public void testCloseConcurrently() throws Throwable { final AtomicLong seqNoGenerator = new AtomicLong(); for (int i = 0; i < threadCount; i++) { final int threadId = i; - threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, writtenOperations, seqNoGenerator, threadExceptions); + threads[i] = new TranslogThread(translog, downLatch, opsPerThread, threadId, + writtenOperations, seqNoGenerator, threadExceptions); threads[i].setDaemon(true); threads[i].start(); } @@ -1948,7 +2000,8 @@ public void testFailFlush() throws IOException { while (failed == false) { try { locations.add(translog.add( - new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), + Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); translog.sync(); opsSynced++; } catch (MockDirectoryWrapper.FakeIOException ex) { @@ -1969,7 +2022,8 @@ public void testFailFlush() throws IOException { if (randomBoolean()) { try { locations.add(translog.add( - new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), + Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); fail("we are already closed"); } catch (AlreadyClosedException ex) { assertNotNull(ex.getCause()); @@ -2003,14 +2057,17 @@ public void testFailFlush() throws IOException { translog.close(); // we are closed final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { - assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + assertEquals("lastCommitted must be 1 less than current", + translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { assertEquals(opsSynced, snapshot.totalOperations()); for (int i = 0; i < opsSynced; i++) { - assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, + assertEquals("expected operation" + i + " to be in the previous translog but wasn't", + tlog.currentFileGeneration() - 1, locations.get(i).generation); Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null", next); @@ -2026,11 +2083,13 @@ public void testTranslogOpsCountIsCorrect() throws IOException { LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer borders regularly for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { locations.add(translog.add( - new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), + lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); try (Translog.Snapshot snapshot = this.translog.newSnapshot()) { assertEquals(opsAdded + 1, snapshot.totalOperations()); for (int i = 0; i < opsAdded; i++) { - assertEquals("expected operation" + i + " to be in the current translog but wasn't", translog.currentFileGeneration(), + assertEquals("expected operation" + i + " to be in the current translog but wasn't", + translog.currentFileGeneration(), locations.get(i).generation); Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null", next); @@ -2043,13 +2102,16 @@ public void testTragicEventCanBeAnyException() throws IOException { Path tempDir = createTempDir(); final FailSwitch fail = new FailSwitch(); TranslogConfig config = getTranslogConfig(tempDir); - Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy()); + Translog translog = getFailableTranslog(fail, config, false, true, null, + createTranslogDeletionPolicy()); LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly - translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), + lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); fail.failAlways(); try { Translog.Location location = translog.add( - new Translog.Index("test", "2", 1, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + new Translog.Index("test", "2", 1, primaryTerm.get(), + lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); if (randomBoolean()) { translog.ensureSynced(location); } else { @@ -2083,7 +2145,8 @@ public void testFatalIOExceptionsWhileWritingConcurrently() throws IOException, List writtenOperations = Collections.synchronizedList(new ArrayList<>()); for (int i = 0; i < threadCount; i++) { final int threadId = i; - threads[i] = new TranslogThread(translog, downLatch, 200, threadId, writtenOperations, seqNoGenerator, threadExceptions) { + threads[i] = new TranslogThread(translog, downLatch, 200, threadId, + writtenOperations, seqNoGenerator, threadExceptions) { @Override protected Translog.Location add(Translog.Operation op) throws IOException { Translog.Location add = super.add(op); @@ -2139,7 +2202,8 @@ protected void afterAdd() throws IOException { } } try (Translog tlog = - new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + new Translog(config, translogUUID, createTranslogDeletionPolicy(), + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); Translog.Snapshot snapshot = tlog.newSnapshot()) { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -2150,7 +2214,8 @@ protected void afterAdd() throws IOException { } assertEquals(writtenOperations.size(), snapshot.totalOperations()); for (int i = 0; i < writtenOperations.size(); i++) { - assertEquals("expected operation" + i + " to be in the previous translog but wasn't", tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation); + assertEquals("expected operation" + i + " to be in the previous translog but wasn't", + tlog.currentFileGeneration() - 1, writtenOperations.get(i).location.generation); Translog.Operation next = snapshot.next(); assertNotNull("operation " + i + " must be non-null", next); assertEquals(next, writtenOperations.get(i).operation); @@ -2166,7 +2231,8 @@ protected void afterAdd() throws IOException { public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations / 2; op++) { - translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -2174,7 +2240,8 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { translog.rollGeneration(); long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); for (int op = translogOperations / 2; op < translogOperations; op++) { - translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -2185,7 +2252,8 @@ public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { @@ -2216,7 +2284,8 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { translogUUID = translog.getTranslogUUID(); int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations / 2; op++) { - translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -2224,7 +2293,8 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { translog.rollGeneration(); comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); for (int op = translogOperations / 2; op < translogOperations; op++) { - translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), + Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -2241,7 +2311,8 @@ public void testRecoveryFromFailureOnTrimming() throws IOException { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); @@ -2307,8 +2378,10 @@ private Translog getFailableTranslog(final FailSwitch fail, final TranslogConfig } boolean success = false; try { - final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation - ThrowingFileChannel throwingFileChannel = new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel); + // don't do partial writes for checkpoints we rely on the fact that the bytes are written as an atomic operation + final boolean isCkpFile = file.getFileName().toString().endsWith(".ckp"); + ThrowingFileChannel throwingFileChannel = + new ThrowingFileChannel(fail, isCkpFile ? false : partialWrites, throwUnknownException, channel); success = true; return throwingFileChannel; } finally { @@ -2344,7 +2417,8 @@ public static class ThrowingFileChannel extends FilterFileChannel { private final boolean partialWrite; private final boolean throwUnknownException; - public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException { + public ThrowingFileChannel(FailSwitch fail, boolean partialWrite, + boolean throwUnknownException, FileChannel delegate) throws MockDirectoryWrapper.FakeIOException { super(delegate); this.fail = fail; this.partialWrite = partialWrite; @@ -2432,7 +2506,8 @@ public void testFailWhileCreateWriteWithRecoveredTLogs() throws IOException { translog.add(new Translog.Index("test", "boom", 0, primaryTerm.get(), "boom".getBytes(Charset.forName("UTF-8")))); translog.close(); try { - new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { + new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { @Override protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) throws IOException { @@ -2447,7 +2522,8 @@ protected TranslogWriter createWriter(long fileGeneration, long initialMinTransl } public void testRecoverWithUnbackedNextGen() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), + Integer.toString(1).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); @@ -2463,7 +2539,8 @@ public void testRecoverWithUnbackedNextGen() throws IOException { assertNotNull("operation 1 must be non-null", op); assertEquals("payload mismatch for operation 1", 1, Integer.parseInt(op.getSource().source.utf8ToString())); - tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(2).getBytes(Charset.forName("UTF-8")))); + tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), + Integer.toString(2).getBytes(Charset.forName("UTF-8")))); } try (Translog tlog = openTranslog(config, translog.getTranslogUUID()); @@ -2472,16 +2549,19 @@ public void testRecoverWithUnbackedNextGen() throws IOException { Translog.Operation secondOp = snapshot.next(); assertNotNull("operation 2 must be non-null", secondOp); - assertEquals("payload mismatch for operation 2", Integer.parseInt(secondOp.getSource().source.utf8ToString()), 2); + assertEquals("payload mismatch for operation 2", + Integer.parseInt(secondOp.getSource().source.utf8ToString()), 2); Translog.Operation firstOp = snapshot.next(); assertNotNull("operation 1 must be non-null", firstOp); - assertEquals("payload mismatch for operation 1", Integer.parseInt(firstOp.getSource().source.utf8ToString()), 1); + assertEquals("payload mismatch for operation 1", + Integer.parseInt(firstOp.getSource().source.utf8ToString()), 1); } } public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), + Integer.toString(0).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); @@ -2490,7 +2570,8 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); try { - Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2500,7 +2581,8 @@ public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { } public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), + Integer.toString(0).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); @@ -2512,7 +2594,8 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { for (int i = 0; i < 1; i++) { @@ -2521,7 +2604,8 @@ public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString())); } } - tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), + Integer.toString(1).getBytes(Charset.forName("UTF-8")))); } try { @@ -2556,12 +2640,14 @@ public void testWithRandomException() throws IOException { String generationUUID = null; try { boolean committing = false; - final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false, generationUUID, createTranslogDeletionPolicy()); + final Translog failableTLog = getFailableTranslog(fail, config, randomBoolean(), false, + generationUUID, createTranslogDeletionPolicy()); try { LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { String doc = lineFileDocs.nextDoc().toString(); - failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), doc.getBytes(Charset.forName("UTF-8")))); + failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), + doc.getBytes(Charset.forName("UTF-8")))); unsynced.add(doc); if (randomBoolean()) { failableTLog.sync(); @@ -2569,7 +2655,8 @@ public void testWithRandomException() throws IOException { unsynced.clear(); } if (randomFloat() < 0.1) { - failableTLog.sync(); // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails + // we have to sync here first otherwise we don't know if the sync succeeded if the commit fails + failableTLog.sync(); syncedDocs.addAll(unsynced); unsynced.clear(); failableTLog.rollGeneration(); @@ -2635,9 +2722,11 @@ public void testWithRandomException() throws IOException { deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery); if (generationUUID == null) { // we never managed to successfully create a translog, make it - generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); } - try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + try (Translog translog = new Translog(config, generationUUID, deletionPolicy, + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); Translog.Snapshot snapshot = translog.newSnapshotFromGen( new Translog.TranslogGeneration(generationUUID, minGenForRecovery), Long.MAX_VALUE)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); @@ -2670,7 +2759,8 @@ private Checkpoint randomCheckpoint() { public void testCheckpointOnDiskFull() throws IOException { final Checkpoint checkpoint = randomCheckpoint(); Path tempDir = createTempDir(); - Checkpoint.write(FileChannel::open, tempDir.resolve("foo.cpk"), checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); + Checkpoint.write(FileChannel::open, tempDir.resolve("foo.cpk"), checkpoint, + StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final Checkpoint checkpoint2 = randomCheckpoint(); try { Checkpoint.write((p, o) -> { @@ -2720,7 +2810,8 @@ public static Translog.Location randomTranslogLocation() { public void testTranslogOpSerialization() throws Exception { BytesReference B_1 = new BytesArray(new byte[]{1}); SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); - assert Version.CURRENT.major <= 6 : "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers"; + assert Version.CURRENT.major <= 6 : + "Using UNASSIGNED_SEQ_NO can be removed in 7.0, because 6.0+ nodes have actual sequence numbers"; long randomSeqNum = randomBoolean() ? SequenceNumbers.UNASSIGNED_SEQ_NO : randomNonNegativeLong(); long primaryTerm = randomSeqNum == SequenceNumbers.UNASSIGNED_SEQ_NO ? 0 : randomIntBetween(1, 16); long randomPrimaryTerm = randomBoolean() ? 0 : randomNonNegativeLong(); @@ -2736,7 +2827,8 @@ public void testTranslogOpSerialization() throws Exception { document.add(seqID.seqNo); document.add(seqID.seqNoDocValue); document.add(seqID.primaryTerm); - ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, Arrays.asList(document), B_1, XContentType.JSON, + ParsedDocument doc = new ParsedDocument(versionField, seqID, "1", "type", null, + Arrays.asList(document), B_1, XContentType.JSON, null); Engine.Index eIndex = new Engine.Index(newUid(doc), doc, randomSeqNum, randomPrimaryTerm, @@ -2980,7 +3072,8 @@ public void testSnapshotReadOperationInReverse() throws Exception { for (int gen = 0; gen < generations; gen++) { final int operations = randomIntBetween(1, 100); for (int i = 0; i < operations; i++) { - Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1}); + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), + seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1}); translog.add(op); views.peek().add(op); } @@ -3005,7 +3098,8 @@ public void testSnapshotDedupOperations() throws Exception { List batch = LongStream.rangeClosed(0, between(0, 500)).boxed().collect(Collectors.toList()); Randomness.shuffle(batch); for (Long seqNo : batch) { - Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1}); + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), + seqNo, primaryTerm.get(), new byte[]{1}); translog.add(op); latestOperations.put(op.seqNo(), op); } @@ -3038,7 +3132,8 @@ public void testCloseSnapshotTwice() throws Exception { public void testTranslogCloseInvariant() throws IOException { assumeTrue("test only works with assertions enabled", Assertions.ENABLED); class MisbehavingTranslog extends Translog { - MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException { + MisbehavingTranslog(TranslogConfig config, String translogUUID, TranslogDeletionPolicy deletionPolicy, + LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) throws IOException { super(config, translogUUID, deletionPolicy, globalCheckpointSupplier, primaryTermSupplier); } @@ -3067,7 +3162,8 @@ void callCloseOnTragicEvent() { final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); - MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get); + MisbehavingTranslog misbehavingTranslog = new MisbehavingTranslog(translogConfig, translogUUID, deletionPolicy, + () -> globalCheckpoint.get(), primaryTerm::get); expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseDirectly()); expectThrows(AssertionError.class, () -> misbehavingTranslog.callCloseUsingIOUtils()); diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 377dd9e6512e4..87b8e3d2df6bb 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -19,6 +19,8 @@ package org.elasticsearch.persistent; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; @@ -293,6 +295,8 @@ public int hashCode() { public static class TestPersistentTasksExecutor extends PersistentTasksExecutor { + private static final Logger logger = LogManager.getLogger(TestPersistentTasksExecutor.class); + public static final String NAME = "cluster:admin/persistent/test"; private final ClusterService clusterService; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index 6872b627ce27b..3b5eaa12a691b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -38,1226 +38,660 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.metrics.stats.Stats; -import org.hamcrest.Matchers; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import org.joda.time.chrono.ISOChronology; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; +import org.junit.Assert; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.function.Consumer; +import java.util.stream.Collectors; public class AutoDateHistogramAggregatorTests extends AggregatorTestCase { - private static final String DATE_FIELD = "date"; private static final String INSTANT_FIELD = "instant"; - private static final List dataset = Arrays.asList( - "2010-03-12T01:07:45", - "2010-04-27T03:43:34", - "2012-05-18T04:11:00", - "2013-05-29T05:11:31", - "2013-10-31T08:24:05", - "2015-02-13T13:09:32", - "2015-06-24T13:47:43", - "2015-11-13T16:14:34", - "2016-03-04T17:09:50", - "2017-12-12T22:55:46"); + private static final List DATES_WITH_TIME = Arrays.asList( + new DateTime(2010, 3, 12, 1, 7, 45, DateTimeZone.UTC), + new DateTime(2010, 4, 27, 3, 43, 34, DateTimeZone.UTC), + new DateTime(2012, 5, 18, 4, 11, 0, DateTimeZone.UTC), + new DateTime(2013, 5, 29, 5, 11, 31, DateTimeZone.UTC), + new DateTime(2013, 10, 31, 8, 24, 5, DateTimeZone.UTC), + new DateTime(2015, 2, 13, 13, 9, 32, DateTimeZone.UTC), + new DateTime(2015, 6, 24, 13, 47, 43, DateTimeZone.UTC), + new DateTime(2015, 11, 13, 16, 14, 34, DateTimeZone.UTC), + new DateTime(2016, 3, 4, 17, 9, 50, DateTimeZone.UTC), + new DateTime(2017, 12, 12, 22, 55, 46, DateTimeZone.UTC)); + + private static final Query DEFAULT_QUERY = new MatchAllDocsQuery(); public void testMatchNoDocs() throws IOException { - testBothCases(new MatchNoDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), - histogram -> assertEquals(0, histogram.getBuckets().size()) + testBothCases(new MatchNoDocsQuery(), DATES_WITH_TIME, + aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), + histogram -> assertEquals(0, histogram.getBuckets().size()) ); } public void testMatchAllDocs() throws IOException { - Query query = new MatchAllDocsQuery(); - - testSearchCase(query, dataset, - aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), - histogram -> assertEquals(10, histogram.getBuckets().size()) + testSearchCase(DEFAULT_QUERY, DATES_WITH_TIME, + aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), + histogram -> assertEquals(10, histogram.getBuckets().size()) ); - testSearchAndReduceCase(query, dataset, - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), - histogram -> assertEquals(8, histogram.getBuckets().size()) + testSearchAndReduceCase(DEFAULT_QUERY, DATES_WITH_TIME, + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), + histogram -> assertEquals(8, histogram.getBuckets().size()) ); } public void testSubAggregations() throws IOException { - Query query = new MatchAllDocsQuery(); - testSearchAndReduceCase(query, dataset, - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD) - .subAggregation(AggregationBuilders.stats("stats").field(DATE_FIELD)), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(8, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2010-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - Stats stats = bucket.getAggregations().get("stats"); - assertEquals("2010-03-12T01:07:45.000Z", stats.getMinAsString()); - assertEquals("2010-04-27T03:43:34.000Z", stats.getMaxAsString()); - assertEquals(2L, stats.getCount()); - - bucket = buckets.get(1); - assertEquals("2011-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertTrue(Double.isInfinite(stats.getMin())); - assertTrue(Double.isInfinite(stats.getMax())); - assertEquals(0L, stats.getCount()); - - bucket = buckets.get(2); - assertEquals("2012-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2012-05-18T04:11:00.000Z", stats.getMinAsString()); - assertEquals("2012-05-18T04:11:00.000Z", stats.getMaxAsString()); - assertEquals(1L, stats.getCount()); - - bucket = buckets.get(3); - assertEquals("2013-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2013-05-29T05:11:31.000Z", stats.getMinAsString()); - assertEquals("2013-10-31T08:24:05.000Z", stats.getMaxAsString()); - assertEquals(2L, stats.getCount()); - - bucket = buckets.get(4); - assertEquals("2014-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertTrue(Double.isInfinite(stats.getMin())); - assertTrue(Double.isInfinite(stats.getMax())); - assertEquals(0L, stats.getCount()); - - bucket = buckets.get(5); - assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2015-02-13T13:09:32.000Z", stats.getMinAsString()); - assertEquals("2015-11-13T16:14:34.000Z", stats.getMaxAsString()); - assertEquals(3L, stats.getCount()); - - bucket = buckets.get(6); - assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2016-03-04T17:09:50.000Z", stats.getMinAsString()); - assertEquals("2016-03-04T17:09:50.000Z", stats.getMaxAsString()); - assertEquals(1L, stats.getCount()); - - bucket = buckets.get(7); - assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - stats = bucket.getAggregations().get("stats"); - assertEquals("2017-12-12T22:55:46.000Z", stats.getMinAsString()); - assertEquals("2017-12-12T22:55:46.000Z", stats.getMaxAsString()); - assertEquals(1L, stats.getCount()); - }); + testSearchAndReduceCase(DEFAULT_QUERY, DATES_WITH_TIME, + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD) + .subAggregation(AggregationBuilders.stats("stats").field(DATE_FIELD)), + histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(8, buckets.size()); + + Histogram.Bucket bucket = buckets.get(0); + assertEquals("2010-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + Stats stats = bucket.getAggregations().get("stats"); + assertEquals("2010-03-12T01:07:45.000Z", stats.getMinAsString()); + assertEquals("2010-04-27T03:43:34.000Z", stats.getMaxAsString()); + assertEquals(2L, stats.getCount()); + + bucket = buckets.get(1); + assertEquals("2011-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertTrue(Double.isInfinite(stats.getMin())); + assertTrue(Double.isInfinite(stats.getMax())); + assertEquals(0L, stats.getCount()); + + bucket = buckets.get(2); + assertEquals("2012-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2012-05-18T04:11:00.000Z", stats.getMinAsString()); + assertEquals("2012-05-18T04:11:00.000Z", stats.getMaxAsString()); + assertEquals(1L, stats.getCount()); + + bucket = buckets.get(3); + assertEquals("2013-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(2, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2013-05-29T05:11:31.000Z", stats.getMinAsString()); + assertEquals("2013-10-31T08:24:05.000Z", stats.getMaxAsString()); + assertEquals(2L, stats.getCount()); + + bucket = buckets.get(4); + assertEquals("2014-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(0, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertTrue(Double.isInfinite(stats.getMin())); + assertTrue(Double.isInfinite(stats.getMax())); + assertEquals(0L, stats.getCount()); + + bucket = buckets.get(5); + assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(3, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2015-02-13T13:09:32.000Z", stats.getMinAsString()); + assertEquals("2015-11-13T16:14:34.000Z", stats.getMaxAsString()); + assertEquals(3L, stats.getCount()); + + bucket = buckets.get(6); + assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2016-03-04T17:09:50.000Z", stats.getMinAsString()); + assertEquals("2016-03-04T17:09:50.000Z", stats.getMaxAsString()); + assertEquals(1L, stats.getCount()); + + bucket = buckets.get(7); + assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); + assertEquals(1, bucket.getDocCount()); + stats = bucket.getAggregations().get("stats"); + assertEquals("2017-12-12T22:55:46.000Z", stats.getMinAsString()); + assertEquals("2017-12-12T22:55:46.000Z", stats.getMaxAsString()); + assertEquals(1L, stats.getCount()); + }); } public void testNoDocs() throws IOException { - Query query = new MatchNoDocsQuery(); - List dates = Collections.emptyList(); - Consumer aggregation = agg -> agg.setNumBuckets(10).field(DATE_FIELD); + final List dates = Collections.emptyList(); + final Consumer aggregation = agg -> agg.setNumBuckets(10).field(DATE_FIELD); - testSearchCase(query, dates, aggregation, - histogram -> assertEquals(0, histogram.getBuckets().size()) + testSearchCase(DEFAULT_QUERY, dates, aggregation, + histogram -> assertEquals(0, histogram.getBuckets().size()) ); - testSearchAndReduceCase(query, dates, aggregation, - histogram -> assertNull(histogram) + testSearchAndReduceCase(DEFAULT_QUERY, dates, aggregation, + Assert::assertNull ); } public void testAggregateWrongField() throws IOException { - testBothCases(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(10).field("wrong_field"), - histogram -> assertEquals(0, histogram.getBuckets().size()) + testBothCases(DEFAULT_QUERY, DATES_WITH_TIME, + aggregation -> aggregation.setNumBuckets(10).field("wrong_field"), + histogram -> assertEquals(0, histogram.getBuckets().size()) ); } public void testIntervalYear() throws IOException { - testSearchCase(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset, - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2015-02-13T13:09:32.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2015-06-24T13:47:43.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2015-11-13T16:14:34.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2016-03-04T17:09:50.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-12-12T22:55:46.000Z", bucket.getKeyAsString()); + final long start = new DateTime(DateTimeZone.UTC).withDate(2015, 1, 1).getMillis(); + final long end = new DateTime(DateTimeZone.UTC).withDate(2017, 12, 31).getMillis(); + final Query rangeQuery = LongPoint.newRangeQuery(INSTANT_FIELD, start, end); + testSearchCase(rangeQuery, DATES_WITH_TIME, + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + for (int i = 0; i < buckets.size(); i++) { + final Histogram.Bucket bucket = buckets.get(i); + assertEquals(DATES_WITH_TIME.get(5 + i), bucket.getKey()); assertEquals(1, bucket.getDocCount()); } + } ); - testSearchAndReduceCase(LongPoint.newRangeQuery(INSTANT_FIELD, asLong("2015-01-01"), asLong("2017-12-31")), dataset, - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(3, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } + testSearchAndReduceCase(rangeQuery, DATES_WITH_TIME, + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), + histogram -> { + final DateTime startDate = new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(startDate, 3); + expectedDocCount.put(startDate.plusYears(1), 1); + expectedDocCount.put(startDate.plusYears(2), 1); + final List buckets = histogram.getBuckets(); + assertEquals(expectedDocCount.size(), buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + } ); } public void testIntervalMonth() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"), - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(6, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-03-04T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-03-05T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-03-06T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList("2017-01-01", "2017-02-02", "2017-02-03", "2017-03-04", "2017-03-05", "2017-03-06"), - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(3, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); + final List datesForMonthInterval = Arrays.asList( + new DateTime(2017, 1, 1, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 3, 4, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 3, 5, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 3, 6, 0, 0, 0, DateTimeZone.UTC)); + testSearchCase(DEFAULT_QUERY, datesForMonthInterval, + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(datesForMonthInterval.size(), buckets.size()); + for (int i = 0; i < buckets.size(); i++) { + final Histogram.Bucket bucket = buckets.get(i); + assertEquals(datesForMonthInterval.get(i), bucket.getKey()); assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-03-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); } + }); + testSearchAndReduceCase(DEFAULT_QUERY, datesForMonthInterval, + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), + histogram -> { + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForMonthInterval.get(0).withDayOfMonth(1), 1); + expectedDocCount.put(datesForMonthInterval.get(1).withDayOfMonth(1), 2); + expectedDocCount.put(datesForMonthInterval.get(3).withDayOfMonth(1), 3); + final List buckets = histogram.getBuckets(); + assertEquals(expectedDocCount.size(), buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + } ); } public void testIntervalDay() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(4, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01", - "2017-02-02", - "2017-02-02", - "2017-02-03", - "2017-02-03", - "2017-02-03", - "2017-02-05" - ), - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-02T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-03T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-04T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-05T00:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } + final List datesForDayInterval = Arrays.asList( + new DateTime(2017, 2, 1, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 5, 0, 0, 0, DateTimeZone.UTC)); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForDayInterval.get(0), 1); + expectedDocCount.put(datesForDayInterval.get(1), 2); + expectedDocCount.put(datesForDayInterval.get(3), 3); + expectedDocCount.put(datesForDayInterval.get(6), 1); + + testSearchCase(DEFAULT_QUERY, datesForDayInterval, + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD), histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(expectedDocCount.size(), buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + }); + testSearchAndReduceCase(DEFAULT_QUERY, datesForDayInterval, + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + } ); } public void testIntervalDayWithTZ() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(4, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-01-31T23:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T23:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-02T23:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-04T23:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList("2017-02-01", "2017-02-02", "2017-02-02", "2017-02-03", "2017-02-03", "2017-02-03", "2017-02-05"), - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-01-31T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-02T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-03T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-04T00:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - }); + final List datesForDayInterval = Arrays.asList( + new DateTime(2017, 2, 1, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 5, 0, 0, 0, DateTimeZone.UTC)); + testSearchCase(DEFAULT_QUERY, datesForDayInterval, + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put("2017-01-31T23:00:00.000-01:00", 1); + expectedDocCount.put("2017-02-01T23:00:00.000-01:00", 2); + expectedDocCount.put("2017-02-02T23:00:00.000-01:00", 3); + expectedDocCount.put("2017-02-04T23:00:00.000-01:00", 1); + final List buckets = histogram.getBuckets(); + assertEquals(expectedDocCount.size(), buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKeyAsString(), 0).longValue(), bucket.getDocCount())); + }); + testSearchAndReduceCase(DEFAULT_QUERY, datesForDayInterval, + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put("2017-01-31T00:00:00.000-01:00", 1); + expectedDocCount.put("2017-02-01T00:00:00.000-01:00", 2); + expectedDocCount.put("2017-02-02T00:00:00.000-01:00", 3); + expectedDocCount.put("2017-02-04T00:00:00.000-01:00", 1); + final List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKeyAsString(), 0).longValue(), bucket.getDocCount())); + }); } public void testIntervalHour() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:35:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T10:15:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T13:06:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T14:04:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T14:05:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T15:59:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T16:06:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(8); - assertEquals("2017-02-01T16:48:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(9); - assertEquals("2017-02-01T16:59:00.000Z", bucket.getKeyAsString()); + final List datesForHourInterval = Arrays.asList( + new DateTime(2017, 2, 1, 9, 2, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 9, 35, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 10, 15, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 13, 6, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 14, 4, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 14, 5, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 15, 59, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 16, 6, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 16, 48, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 16, 59, 0, DateTimeZone.UTC)); + testSearchCase(DEFAULT_QUERY, datesForHourInterval, + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(datesForHourInterval.size(), buckets.size()); + for (int i = 0; i < buckets.size(); i++) { + final Histogram.Bucket bucket = buckets.get(i); + assertEquals(datesForHourInterval.get(i), bucket.getKey()); assertEquals(1, bucket.getDocCount()); } + } ); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(8, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T10:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T11:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T12:00:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T13:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T14:00:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T16:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - } + testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, + aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), + histogram -> { + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForHourInterval.get(0).withMinuteOfHour(0), 2); + expectedDocCount.put(datesForHourInterval.get(2).withMinuteOfHour(0), 1); + expectedDocCount.put(datesForHourInterval.get(3).withMinuteOfHour(0), 1); + expectedDocCount.put(datesForHourInterval.get(4).withMinuteOfHour(0), 2); + expectedDocCount.put(datesForHourInterval.get(6).withMinuteOfHour(0), 1); + expectedDocCount.put(datesForHourInterval.get(7).withMinuteOfHour(0), 3); + final List buckets = histogram.getBuckets(); + assertEquals(8, buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + } + ); + testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, + aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), + histogram -> { + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForHourInterval.get(0).withMinuteOfHour(0), 3); + expectedDocCount.put(datesForHourInterval.get(0).plusHours(3).withMinuteOfHour(0), 3); + expectedDocCount.put(datesForHourInterval.get(0).plusHours(6).withMinuteOfHour(0), 4); + final List buckets = histogram.getBuckets(); + assertEquals(expectedDocCount.size(), buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + } ); } public void testIntervalHourWithTZ() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T08:02:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T08:35:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T09:15:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T12:06:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T13:04:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T13:05:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T14:59:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T15:06:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(8); - assertEquals("2017-02-01T15:48:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(9); - assertEquals("2017-02-01T15:59:00.000-01:00", bucket.getKeyAsString()); + final List datesForHourInterval = Arrays.asList( + new DateTime(2017, 2, 1, 9, 2, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 9, 35, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 10, 15, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 13, 6, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 14, 4, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 14, 5, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 15, 59, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 16, 6, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 16, 48, 0, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 16, 59, 0, DateTimeZone.UTC)); + testSearchCase(DEFAULT_QUERY, datesForHourInterval, + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), + histogram -> { + final List dateStrings = datesForHourInterval.stream() + .map(dateTime -> dateTime.withZone(DateTimeZone.forOffsetHours(-1)).toString()).collect(Collectors.toList()); + final List buckets = histogram.getBuckets(); + assertEquals(datesForHourInterval.size(), buckets.size()); + for (int i = 0; i < buckets.size(); i++) { + final Histogram.Bucket bucket = buckets.get(i); + assertEquals(dateStrings.get(i), bucket.getKeyAsString()); assertEquals(1, bucket.getDocCount()); } + } ); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(8, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T08:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T10:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T11:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T12:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T13:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T14:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T15:00:00.000-01:00", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - } + testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, + aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), + histogram -> { + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put("2017-02-01T08:00:00.000-01:00", 2); + expectedDocCount.put("2017-02-01T09:00:00.000-01:00", 1); + expectedDocCount.put("2017-02-01T12:00:00.000-01:00", 1); + expectedDocCount.put("2017-02-01T13:00:00.000-01:00", 2); + expectedDocCount.put("2017-02-01T14:00:00.000-01:00", 1); + expectedDocCount.put("2017-02-01T15:00:00.000-01:00", 3); + final List buckets = histogram.getBuckets(); + assertEquals(8, buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKeyAsString(), 0).longValue(), bucket.getDocCount())); + } ); } - public void testAllSecondIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusSeconds(i); - dataset.add(format.print(date)); + public void testRandomSecondIntervals() throws IOException { + final int length = 120; + final List dataset = new ArrayList<>(length); + final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, 0, DateTimeZone.UTC); + for (int i = 0; i < length; i++) { + final DateTime date = startDate.plusSeconds(i); + dataset.add(date); } - - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusSeconds(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(120, buckets.size()); - for (int i = 0; i < 120; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusSeconds(i * 5), bucket.getKey()); - assertEquals(5, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(60, buckets.size()); - for (int i = 0; i < 60; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusSeconds(i * 10), bucket.getKey()); - assertEquals(10, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(20, buckets.size()); - for (int i = 0; i < 20; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusSeconds(i * 30), bucket.getKey()); - assertEquals(30, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - for (int i = 0; i < 10; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i), bucket.getKey()); - assertEquals(60, bucket.getDocCount()); - } - }); + final Map bucketsToExpectedDocCountMap = new HashMap<>(); + bucketsToExpectedDocCountMap.put(120, 1); + bucketsToExpectedDocCountMap.put(60, 5); + bucketsToExpectedDocCountMap.put(20, 10); + bucketsToExpectedDocCountMap.put(10, 30); + bucketsToExpectedDocCountMap.put(3, 60); + final Map.Entry randomEntry = randomFrom(bucketsToExpectedDocCountMap.entrySet()); + testSearchAndReduceCase(DEFAULT_QUERY, dataset, + aggregation -> aggregation.setNumBuckets(randomEntry.getKey()).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + final int expectedDocCount = randomEntry.getValue(); + final int expectedSize = length / expectedDocCount; + assertEquals(expectedSize, buckets.size()); + final int randomIndex = randomInt(expectedSize - 1); + final Histogram.Bucket bucket = buckets.get(randomIndex); + assertEquals(startDate.plusSeconds(randomIndex * expectedDocCount), bucket.getKey()); + assertEquals(expectedDocCount, bucket.getDocCount()); + }); } - public void testAllMinuteIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusMinutes(i); - dataset.add(format.print(date)); + public void testRandomMinuteIntervals() throws IOException { + final int length = 120; + final List dataset = new ArrayList<>(length); + final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + for (int i = 0; i < length; i++) { + final DateTime date = startDate.plusMinutes(i); + dataset.add(date); } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(120, buckets.size()); - for (int i = 0; i < 120; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i * 5), bucket.getKey()); - assertEquals(5, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(60, buckets.size()); - for (int i = 0; i < 60; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i * 10), bucket.getKey()); - assertEquals(10, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(20, buckets.size()); - for (int i = 0; i < 20; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMinutes(i * 30), bucket.getKey()); - assertEquals(30, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - for (int i = 0; i < 10; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusHours(i), bucket.getKey()); - assertEquals(60, bucket.getDocCount()); - } - }); + final Map bucketsToExpectedDocCountMap = new HashMap<>(); + bucketsToExpectedDocCountMap.put(120, 1); + bucketsToExpectedDocCountMap.put(60, 5); + bucketsToExpectedDocCountMap.put(20, 10); + bucketsToExpectedDocCountMap.put(10, 30); + bucketsToExpectedDocCountMap.put(3, 60); + final Map.Entry randomEntry = randomFrom(bucketsToExpectedDocCountMap.entrySet()); + testSearchAndReduceCase(DEFAULT_QUERY, dataset, + aggregation -> aggregation.setNumBuckets(randomEntry.getKey()).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + final int expectedDocCount = randomEntry.getValue(); + final int expectedSize = length / expectedDocCount; + assertEquals(expectedSize, buckets.size()); + final int randomIndex = randomInt(expectedSize - 1); + final Histogram.Bucket bucket = buckets.get(randomIndex); + assertEquals(startDate.plusMinutes(randomIndex * expectedDocCount), bucket.getKey()); + assertEquals(expectedDocCount, bucket.getDocCount()); + }); } - public void testAllHourIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusHours(i); - dataset.add(format.print(date)); + public void testRandomHourIntervals() throws IOException { + final int length = 72; + final List dataset = new ArrayList<>(length); + final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + for (int i = 0; i < length; i++) { + final DateTime date = startDate.plusHours(i); + dataset.add(date); } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusHours(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(200, buckets.size()); - for (int i = 0; i < 200; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusHours(i * 3), bucket.getKey()); - assertEquals(3, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(50, buckets.size()); - for (int i = 0; i < 50; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusHours(i * 12), bucket.getKey()); - assertEquals(12, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(30).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(25, buckets.size()); - for (int i = 0; i < 25; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusDays(i), bucket.getKey()); - assertEquals(24, bucket.getDocCount()); - } - }); + final Map bucketsToExpectedDocCountMap = new HashMap<>(); + bucketsToExpectedDocCountMap.put(72, 1); + bucketsToExpectedDocCountMap.put(36, 3); + bucketsToExpectedDocCountMap.put(12, 12); + bucketsToExpectedDocCountMap.put(3, 24); + final Map.Entry randomEntry = randomFrom(bucketsToExpectedDocCountMap.entrySet()); + testSearchAndReduceCase(DEFAULT_QUERY, dataset, + aggregation -> aggregation.setNumBuckets(randomEntry.getKey()).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + final int expectedDocCount = randomEntry.getValue(); + final int expectedSize = length / expectedDocCount; + assertEquals(expectedSize, buckets.size()); + final int randomIndex = randomInt(expectedSize - 1); + final Histogram.Bucket bucket = buckets.get(randomIndex); + assertEquals(startDate.plusHours(randomIndex * expectedDocCount), bucket.getKey()); + assertEquals(expectedDocCount, bucket.getDocCount()); + }); } - public void testAllDayIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 700; i++) { - DateTime date = startDate.plusDays(i); - dataset.add(format.print(date)); + public void testRandomDayIntervals() throws IOException { + final int length = 140; + final List dataset = new ArrayList<>(length); + final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + for (int i = 0; i < length; i++) { + final DateTime date = startDate.plusDays(i); + dataset.add(date); } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(700).field(DATE_FIELD), + final int randomChoice = randomIntBetween(1, 3); + if (randomChoice == 1) { + testSearchAndReduceCase(DEFAULT_QUERY, dataset, + aggregation -> aggregation.setNumBuckets(length).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(700, buckets.size()); - for (int i = 0; i < 700; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusDays(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } + final List buckets = histogram.getBuckets(); + assertEquals(length, buckets.size()); + final int randomIndex = randomInt(length - 1); + final Histogram.Bucket bucket = buckets.get(randomIndex); + assertEquals(startDate.plusDays(randomIndex), bucket.getKey()); + assertEquals(1, bucket.getDocCount()); }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), + } else if (randomChoice == 2) { + testSearchAndReduceCase(DEFAULT_QUERY, dataset, + aggregation -> aggregation.setNumBuckets(60).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(100, buckets.size()); - for (int i = 0; i < 100; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusDays(i * 7), bucket.getKey()); - assertEquals(7, bucket.getDocCount()); - } + final List buckets = histogram.getBuckets(); + final int expectedDocCount = 7; + assertEquals(20, buckets.size()); + final int randomIndex = randomInt(19); + final Histogram.Bucket bucket = buckets.get(randomIndex); + assertEquals(startDate.plusDays(randomIndex * expectedDocCount), bucket.getKey()); + assertEquals(expectedDocCount, bucket.getDocCount()); }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(30).field(DATE_FIELD), + } else if (randomChoice == 3) { + testSearchAndReduceCase(DEFAULT_QUERY, dataset, + aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(24, buckets.size()); - for (int i = 0; i < 24; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMonths(i), bucket.getKey()); - assertThat(bucket.getDocCount(), Matchers.lessThanOrEqualTo(31L)); - } + final List buckets = histogram.getBuckets(); + assertEquals(5, buckets.size()); + final int randomIndex = randomInt(2); + final Histogram.Bucket bucket = buckets.get(randomIndex); + assertEquals(startDate.plusMonths(randomIndex), bucket.getKey()); + assertEquals(startDate.plusMonths(randomIndex).dayOfMonth().getMaximumValue(), bucket.getDocCount()); }); - } - - public void testAllMonthIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusMonths(i); - dataset.add(format.print(date)); } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMonths(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(200, buckets.size()); - for (int i = 0; i < 200; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusMonths(i * 3), bucket.getKey()); - assertEquals(3, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, - aggregation -> aggregation.setNumBuckets(60).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(50, buckets.size()); - for (int i = 0; i < 50; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i), bucket.getKey()); - assertEquals(12, bucket.getDocCount()); - } - }); } - public void testAllYearIntervals() throws IOException { - DateTimeFormatter format = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSZ"); - List dataset = new ArrayList<>(); - DateTime startDate = new DateTime(2017, 01, 01, 00, 00, 00, ISOChronology.getInstanceUTC()); - for (int i = 0; i < 600; i++) { - DateTime date = startDate.plusYears(i); - dataset.add(format.print(date)); + public void testRandomMonthIntervals() throws IOException { + final int length = 60; + final List dataset = new ArrayList<>(length); + final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + for (int i = 0; i < length; i++) { + final DateTime date = startDate.plusMonths(i); + dataset.add(date); } - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(600).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(600, buckets.size()); - for (int i = 0; i < 600; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i), bucket.getKey()); - assertEquals(1, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(300).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(120, buckets.size()); - for (int i = 0; i < 120; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 5), bucket.getKey()); - assertEquals(5, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(100).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(60, buckets.size()); - for (int i = 0; i < 60; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 10), bucket.getKey()); - assertEquals(10, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(50).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(30, buckets.size()); - for (int i = 0; i < 30; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 20), bucket.getKey()); - assertEquals(20, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(20).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(12, buckets.size()); - for (int i = 0; i < 12; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 50), bucket.getKey()); - assertEquals(50, bucket.getDocCount()); - } - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(6, buckets.size()); - for (int i = 0; i < 6; i++) { - Histogram.Bucket bucket = buckets.get(i); - assertEquals(startDate.plusYears(i * 100), bucket.getKey()); - assertEquals(100, bucket.getDocCount()); - } - }); + final Map bucketsToExpectedDocCountMap = new HashMap<>(); + bucketsToExpectedDocCountMap.put(60, 1); + bucketsToExpectedDocCountMap.put(30, 3); + bucketsToExpectedDocCountMap.put(6, 12); + final Map.Entry randomEntry = randomFrom(bucketsToExpectedDocCountMap.entrySet()); + testSearchAndReduceCase(DEFAULT_QUERY, dataset, + aggregation -> aggregation.setNumBuckets(randomEntry.getKey()).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + final int expectedDocCount = randomEntry.getValue(); + final int expectedSize = length / expectedDocCount; + assertEquals(expectedSize, buckets.size()); + final int randomIndex = randomInt(expectedSize - 1); + final Histogram.Bucket bucket = buckets.get(randomIndex); + assertEquals(startDate.plusMonths(randomIndex * expectedDocCount), bucket.getKey()); + assertEquals(expectedDocCount, bucket.getDocCount()); + }); } - public void testInterval3Hour() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(10, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:35:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T10:15:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T13:06:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T14:04:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T14:05:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T15:59:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T16:06:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(8); - assertEquals("2017-02-01T16:48:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(9); - assertEquals("2017-02-01T16:59:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - } - ); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:00.000Z", - "2017-02-01T09:35:00.000Z", - "2017-02-01T10:15:00.000Z", - "2017-02-01T13:06:00.000Z", - "2017-02-01T14:04:00.000Z", - "2017-02-01T14:05:00.000Z", - "2017-02-01T15:59:00.000Z", - "2017-02-01T16:06:00.000Z", - "2017-02-01T16:48:00.000Z", - "2017-02-01T16:59:00.000Z" - ), - aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(3, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T12:00:00.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T15:00:00.000Z", bucket.getKeyAsString()); - assertEquals(4, bucket.getDocCount()); - } - ); + public void testRandomYearIntervals() throws IOException { + final int length = 300; + final List dataset = new ArrayList<>(length); + final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + for (int i = 0; i < length; i++) { + final DateTime date = startDate.plusYears(i); + dataset.add(date); + } + final Map bucketsToExpectedDocCountMap = new HashMap<>(); + bucketsToExpectedDocCountMap.put(300, 1); + bucketsToExpectedDocCountMap.put(150, 5); + bucketsToExpectedDocCountMap.put(50, 10); + bucketsToExpectedDocCountMap.put(25, 20); + bucketsToExpectedDocCountMap.put(10, 50); + bucketsToExpectedDocCountMap.put(5, 100); + final Map.Entry randomEntry = randomFrom(bucketsToExpectedDocCountMap.entrySet()); + testSearchAndReduceCase(DEFAULT_QUERY, dataset, + aggregation -> aggregation.setNumBuckets(randomEntry.getKey()).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + final int expectedDocCount = randomEntry.getValue(); + final int expectedSize = length / expectedDocCount; + assertEquals(expectedSize, buckets.size()); + final int randomIndex = randomInt(expectedSize - 1); + final Histogram.Bucket bucket = buckets.get(randomIndex); + assertEquals(startDate.plusYears(randomIndex * expectedDocCount), bucket.getKey()); + assertEquals(expectedDocCount, bucket.getDocCount()); + }); } public void testIntervalMinute() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:35.000Z", - "2017-02-01T09:02:59.000Z", - "2017-02-01T09:15:37.000Z", - "2017-02-01T09:16:04.000Z", - "2017-02-01T09:16:42.000Z" - ), - aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(5, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:02:35.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:02:59.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T09:15:37.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T09:16:04.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T09:16:42.000Z", bucket.getKeyAsString()); + final List datesForMinuteInterval = Arrays.asList( + new DateTime(2017, 2, 1, 9, 2, 35, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 9, 2, 59, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 9, 15, 37, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 9, 16, 4, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 9, 16, 42, DateTimeZone.UTC)); + + testSearchCase(DEFAULT_QUERY, datesForMinuteInterval, + aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(datesForMinuteInterval.size(), buckets.size()); + for (int i = 0; i < buckets.size(); i++) { + final Histogram.Bucket bucket = buckets.get(i); + assertEquals(datesForMinuteInterval.get(i), bucket.getKey()); assertEquals(1, bucket.getDocCount()); } + } ); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T09:02:35.000Z", - "2017-02-01T09:02:59.000Z", - "2017-02-01T09:15:37.000Z", - "2017-02-01T09:16:04.000Z", - "2017-02-01T09:16:42.000Z" - ), - aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(15, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T09:02:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T09:03:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T09:04:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T09:05:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T09:06:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T09:07:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T09:08:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(7); - assertEquals("2017-02-01T09:09:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(8); - assertEquals("2017-02-01T09:10:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(9); - assertEquals("2017-02-01T09:11:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(10); - assertEquals("2017-02-01T09:12:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(11); - assertEquals("2017-02-01T09:13:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(12); - assertEquals("2017-02-01T09:14:00.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(13); - assertEquals("2017-02-01T09:15:00.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(14); - assertEquals("2017-02-01T09:16:00.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - } + testSearchAndReduceCase(DEFAULT_QUERY, datesForMinuteInterval, + aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), + histogram -> { + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForMinuteInterval.get(0).withSecondOfMinute(0), 2); + expectedDocCount.put(datesForMinuteInterval.get(2).withSecondOfMinute(0), 1); + expectedDocCount.put(datesForMinuteInterval.get(3).withSecondOfMinute(0), 2); + final List buckets = histogram.getBuckets(); + assertEquals(15, buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + } ); } public void testIntervalSecond() throws IOException { - testSearchCase(new MatchAllDocsQuery(), - Arrays.asList("2017-02-01T00:00:05.015Z", "2017-02-01T00:00:07.299Z", "2017-02-01T00:00:07.074Z", - "2017-02-01T00:00:11.688Z", "2017-02-01T00:00:11.210Z", "2017-02-01T00:00:11.380Z"), - aggregation -> aggregation.setNumBuckets(7).field(DATE_FIELD), histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(3, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T00:00:07.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - }); - testSearchAndReduceCase(new MatchAllDocsQuery(), - Arrays.asList( - "2017-02-01T00:00:05.015Z", - "2017-02-01T00:00:07.299Z", - "2017-02-01T00:00:07.074Z", - "2017-02-01T00:00:11.688Z", - "2017-02-01T00:00:11.210Z", - "2017-02-01T00:00:11.380Z" - ), - aggregation -> aggregation.setNumBuckets(7).field(DATE_FIELD), - histogram -> { - List buckets = histogram.getBuckets(); - assertEquals(7, buckets.size()); - - Histogram.Bucket bucket = buckets.get(0); - assertEquals("2017-02-01T00:00:05.000Z", bucket.getKeyAsString()); - assertEquals(1, bucket.getDocCount()); - - bucket = buckets.get(1); - assertEquals("2017-02-01T00:00:06.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(2); - assertEquals("2017-02-01T00:00:07.000Z", bucket.getKeyAsString()); - assertEquals(2, bucket.getDocCount()); - - bucket = buckets.get(3); - assertEquals("2017-02-01T00:00:08.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(4); - assertEquals("2017-02-01T00:00:09.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(5); - assertEquals("2017-02-01T00:00:10.000Z", bucket.getKeyAsString()); - assertEquals(0, bucket.getDocCount()); - - bucket = buckets.get(6); - assertEquals("2017-02-01T00:00:11.000Z", bucket.getKeyAsString()); - assertEquals(3, bucket.getDocCount()); - } + final List datesForSecondInterval = Arrays.asList( + new DateTime(2017, 2, 1, 0, 0, 5, 15, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 0, 0, 7, 299, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 0, 0, 7, 74, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 0, 0, 11, 688, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 0, 0, 11, 210, DateTimeZone.UTC), + new DateTime(2017, 2, 1, 0, 0, 11, 380, DateTimeZone.UTC)); + final DateTime startDate = datesForSecondInterval.get(0).withMillisOfSecond(0); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(startDate, 1); + expectedDocCount.put(startDate.plusSeconds(2), 2); + expectedDocCount.put(startDate.plusSeconds(6), 3); + + testSearchCase(DEFAULT_QUERY, datesForSecondInterval, + aggregation -> aggregation.setNumBuckets(7).field(DATE_FIELD), histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(expectedDocCount.size(), buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + }); + testSearchAndReduceCase(DEFAULT_QUERY, datesForSecondInterval, + aggregation -> aggregation.setNumBuckets(7).field(DATE_FIELD), + histogram -> { + final List buckets = histogram.getBuckets(); + assertEquals(7, buckets.size()); + buckets.forEach(bucket -> + assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + } ); } - private void testSearchCase(Query query, List dataset, - Consumer configure, - Consumer verify) throws IOException { + private void testSearchCase(final Query query, final List dataset, + final Consumer configure, + final Consumer verify) throws IOException { executeTestCase(false, query, dataset, configure, verify); } - private void testSearchAndReduceCase(Query query, List dataset, - Consumer configure, - Consumer verify) throws IOException { + private void testSearchAndReduceCase(final Query query, final List dataset, + final Consumer configure, + final Consumer verify) throws IOException { executeTestCase(true, query, dataset, configure, verify); } - private void testBothCases(Query query, List dataset, - Consumer configure, - Consumer verify) throws IOException { - testSearchCase(query, dataset, configure, verify); - testSearchAndReduceCase(query, dataset, configure, verify); + private void testBothCases(final Query query, final List dataset, + final Consumer configure, + final Consumer verify) throws IOException { + executeTestCase(false, query, dataset, configure, verify); + executeTestCase(true, query, dataset, configure, verify); } @Override protected IndexSettings createIndexSettings() { - Settings nodeSettings = Settings.builder() - .put("search.max_buckets", 100000).build(); + final Settings nodeSettings = Settings.builder() + .put("search.max_buckets", 25000).build(); return new IndexSettings( IndexMetaData.builder("_index").settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) .numberOfShards(1) @@ -1268,19 +702,18 @@ protected IndexSettings createIndexSettings() { ); } - private void executeTestCase(boolean reduced, Query query, List dataset, - Consumer configure, - Consumer verify) throws IOException { - + private void executeTestCase(final boolean reduced, final Query query, final List dataset, + final Consumer configure, + final Consumer verify) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { - Document document = new Document(); - for (String date : dataset) { + final Document document = new Document(); + for (final DateTime date : dataset) { if (frequently()) { indexWriter.commit(); } - long instant = asLong(date); + final long instant = date.getMillis(); document.add(new SortedNumericDocValuesField(DATE_FIELD, instant)); document.add(new LongPoint(INSTANT_FIELD, instant)); indexWriter.addDocument(document); @@ -1289,19 +722,19 @@ private void executeTestCase(boolean reduced, Query query, List dataset, } try (IndexReader indexReader = DirectoryReader.open(directory)) { - IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + final IndexSearcher indexSearcher = newSearcher(indexReader, true, true); - AutoDateHistogramAggregationBuilder aggregationBuilder = new AutoDateHistogramAggregationBuilder("_name"); + final AutoDateHistogramAggregationBuilder aggregationBuilder = new AutoDateHistogramAggregationBuilder("_name"); if (configure != null) { configure.accept(aggregationBuilder); } - DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name"); - DateFieldMapper.DateFieldType fieldType = builder.fieldType(); + final DateFieldMapper.Builder builder = new DateFieldMapper.Builder("_name"); + final DateFieldMapper.DateFieldType fieldType = builder.fieldType(); fieldType.setHasDocValues(true); fieldType.setName(aggregationBuilder.field()); - InternalAutoDateHistogram histogram; + final InternalAutoDateHistogram histogram; if (reduced) { histogram = searchAndReduce(indexSearcher, query, aggregationBuilder, fieldType); } else { @@ -1311,8 +744,4 @@ private void executeTestCase(boolean reduced, Query query, List dataset, } } } - - private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(dateTime).getMillis(); - } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 75a86831bc554..9dca64d5b831d 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -20,6 +20,8 @@ package org.elasticsearch.snapshots.mockstore; import com.carrotsearch.randomizedtesting.RandomizedContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.MetaData; @@ -57,6 +59,7 @@ import java.util.concurrent.atomic.AtomicLong; public class MockRepository extends FsRepository { + private static final Logger logger = LogManager.getLogger(MockRepository.class); public static class Plugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin { diff --git a/server/src/test/resources/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json b/server/src/test/resources/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json new file mode 100644 index 0000000000000..2edb45742b749 --- /dev/null +++ b/server/src/test/resources/org/elasticsearch/action/bulk/simple-bulk-missing-index-type.json @@ -0,0 +1,5 @@ +{ "index":{"_id":"1"} } +{ "field1" : "value1" } +{ "delete" : { "_id" : "2" } } +{ "create" : { "_id" : "3" } } +{ "field1" : "value3" } diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index cc230bf79d536..6a2c7780c383e 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -389,6 +389,18 @@ protected IndexShard reinitShard(IndexShard current, IndexingOperationListener.. * @param listeners new listerns to use for the newly created shard */ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexingOperationListener... listeners) throws IOException { + return reinitShard(current, routing, current.engineFactory, listeners); + } + + /** + * Takes an existing shard, closes it and starts a new initialing shard at the same location + * + * @param routing the shard routing to use for the newly created shard. + * @param listeners new listerns to use for the newly created shard + * @param engineFactory the engine factory for the new shard + */ + protected IndexShard reinitShard(IndexShard current, ShardRouting routing, EngineFactory engineFactory, + IndexingOperationListener... listeners) throws IOException { closeShards(current); return newShard( routing, @@ -396,7 +408,7 @@ protected IndexShard reinitShard(IndexShard current, ShardRouting routing, Index current.indexSettings().getIndexMetaData(), null, null, - current.engineFactory, + engineFactory, current.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER, listeners); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java deleted file mode 100644 index 06bef2105edb2..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/NoOpDisruptionScheme.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.disruption; - -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.test.InternalTestCluster; - -public class NoOpDisruptionScheme implements ServiceDisruptionScheme { - - @Override - public void applyToCluster(InternalTestCluster cluster) { - - } - - @Override - public void removeFromCluster(InternalTestCluster cluster) { - - } - - @Override - public void applyToNode(String node, InternalTestCluster cluster) { - - } - - @Override - public void removeFromNode(String node, InternalTestCluster cluster) { - - } - - @Override - public void startDisrupting() { - - } - - @Override - public void stopDisrupting() { - - } - - @Override - public void testClusterClosed() { - - } - - @Override - public void removeAndEnsureHealthy(InternalTestCluster cluster) { - - } - - @Override - public TimeValue expectedTimeToHeal() { - return TimeValue.timeValueSeconds(0); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java index 69df45958947b..6a821293bf5e0 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java +++ b/test/framework/src/main/java/org/elasticsearch/test/disruption/SingleNodeDisruption.java @@ -34,12 +34,6 @@ public abstract class SingleNodeDisruption implements ServiceDisruptionScheme { protected volatile InternalTestCluster cluster; protected final Random random; - - public SingleNodeDisruption(String disruptedNode, Random random) { - this(random); - this.disruptedNode = disruptedNode; - } - public SingleNodeDisruption(Random random) { this.random = new Random(random.nextLong()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 9f9e78fa2a4e9..1959081ec92ad 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -56,6 +56,7 @@ import org.elasticsearch.test.NotEqualMessageBuilder; import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; +import org.hamcrest.core.CombinableMatcher; import java.io.IOException; import java.nio.file.Files; @@ -68,6 +69,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.function.Function; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; @@ -470,6 +472,14 @@ public static Matcher hasScore(final float score) { return new ElasticsearchMatchers.SearchHitHasScoreMatcher(score); } + public static CombinableMatcher hasProperty(Function property, Matcher valueMatcher) { + return ElasticsearchMatchers.HasPropertyLambdaMatcher.hasProperty(property, valueMatcher); + } + + public static Function fieldFromSource(String fieldName) { + return (response) -> response.getSourceAsMap().get(fieldName); + } + public static T assertBooleanSubQuery(Query query, Class subqueryType, int i) { assertThat(query, instanceOf(BooleanQuery.class)); BooleanQuery q = (BooleanQuery) query; diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java index f49cc3bd39ee7..3332058648106 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java @@ -20,7 +20,12 @@ import org.elasticsearch.search.SearchHit; import org.hamcrest.Description; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; import org.hamcrest.TypeSafeMatcher; +import org.hamcrest.core.CombinableMatcher; + +import java.util.function.Function; public class ElasticsearchMatchers { @@ -115,4 +120,27 @@ public void describeTo(final Description description) { description.appendText("searchHit score should be ").appendValue(score); } } + + public static class HasPropertyLambdaMatcher extends FeatureMatcher { + + private final Function property; + + private HasPropertyLambdaMatcher(Matcher subMatcher, Function property) { + super(subMatcher, "object with", "lambda"); + this.property = property; + } + + @Override + protected V featureValueOf(T actual) { + return property.apply(actual); + } + + /** + * @param valueMatcher The matcher to apply to the property + * @param property The lambda to fetch property + */ + public static CombinableMatcher hasProperty(Function property, Matcher valueMatcher) { + return new CombinableMatcher<>(new HasPropertyLambdaMatcher<>(valueMatcher, property)); + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java index 41cdaefe03575..94bab9e7d5e67 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java +++ b/test/framework/src/main/java/org/elasticsearch/test/tasks/MockTaskManager.java @@ -19,6 +19,8 @@ package org.elasticsearch.test.tasks; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.settings.Setting; @@ -38,6 +40,8 @@ */ public class MockTaskManager extends TaskManager { + private static final Logger logger = LogManager.getLogger(MockTaskManager.class); + public static final Setting USE_MOCK_TASK_MANAGER_SETTING = Setting.boolSetting("tests.mock.taskmanager.enabled", false, Property.NodeScope); diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index 73ffb7a48fb81..4a2a5d0cb4290 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -20,6 +20,8 @@ package org.elasticsearch.test.transport; import com.carrotsearch.randomizedtesting.SysGlobals; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; @@ -82,6 +84,7 @@ * fake DiscoveryNode instances where the publish address is one of the bound addresses). */ public final class MockTransportService extends TransportService { + private static final Logger logger = LogManager.getLogger(MockTransportService.class); private final Map> openConnections = new HashMap<>(); private static final int JVM_ORDINAL = Integer.parseInt(System.getProperty(SysGlobals.CHILDVM_SYSPROP_JVM_ID, "0")); diff --git a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java index 6d636e557a9de..11b16b67651aa 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/MockTcpTransport.java @@ -18,6 +18,8 @@ */ package org.elasticsearch.transport; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cli.SuppressForbidden; @@ -70,6 +72,7 @@ * the networking layer in the worst possible way since it blocks and uses a thread per request model. */ public class MockTcpTransport extends TcpTransport { + private static final Logger logger = LogManager.getLogger(MockTcpTransport.class); /** * A pre-built light connection profile that shares a single connection across all diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index 3ba582d5d7856..ffc6c0baa3d1d 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -77,7 +77,6 @@ include::security/create-users.asciidoc[] include::security/delete-app-privileges.asciidoc[] include::security/delete-role-mappings.asciidoc[] include::security/delete-roles.asciidoc[] -include::security/delete-tokens.asciidoc[] include::security/delete-users.asciidoc[] include::security/disable-users.asciidoc[] include::security/enable-users.asciidoc[] @@ -87,4 +86,5 @@ include::security/get-roles.asciidoc[] include::security/get-tokens.asciidoc[] include::security/get-users.asciidoc[] include::security/has-privileges.asciidoc[] +include::security/invalidate-tokens.asciidoc[] include::security/ssl.asciidoc[] diff --git a/x-pack/docs/en/rest-api/security/create-users.asciidoc b/x-pack/docs/en/rest-api/security/create-users.asciidoc index 789e8c7e80dbf..7487e7cca6735 100644 --- a/x-pack/docs/en/rest-api/security/create-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-users.asciidoc @@ -35,6 +35,12 @@ printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_bloc -- +==== Query Parameters + +`refresh`:: + (string) One of `true`, `false`, or `wait_for`. + These values have the same meaning as in the <>, + but the default value for this API (Put User) is `true`. ==== Request Body @@ -52,8 +58,26 @@ The following parameters can be specified in the body of a POST or PUT request: `metadata`:: (object) Arbitrary metadata that you want to associate with the user. -`password` (required):: -(string) The user's password. Passwords must be at least 6 characters long. +`password` :: +(string) The user's password. Passwords must be at least 6 characters long. ++ +When adding a user, one of `password` or `password_hash` is required. +When updating an existing user, the password is optional, so that other +fields on the user (such as their roles) may be updated without modifying +the user's password. + +`password_hash` :: +(string) A _hash_ of the user's password. This must be produced using the +same hashing algorithm as has been configured for password storage. For more +details, see the explanation of the +`xpack.security.authc.password_hashing.algorithm` setting in +<>. ++ +Using this parameter allows the client to pre-hash the password for +performance and/or confidentiality reasons. ++ +The `password` parameter and the `password_hash` parameter cannot be +used in the same request. `roles` (required):: (list) A set of roles the user has. The roles determine the user's access diff --git a/x-pack/docs/en/rest-api/security/get-tokens.asciidoc b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc index c80b4f60c6bcd..f0da5700b5a47 100644 --- a/x-pack/docs/en/rest-api/security/get-tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-tokens.asciidoc @@ -29,7 +29,7 @@ period is defined by the `xpack.security.authc.token.timeout` setting. For more information, see <>. If you want to invalidate a token immediately, you can do so by using the -<>. +<>. ==== Request Body diff --git a/x-pack/docs/en/rest-api/security/delete-tokens.asciidoc b/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc similarity index 53% rename from x-pack/docs/en/rest-api/security/delete-tokens.asciidoc rename to x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc index 7d6bae2a4c40f..915fa7c45d8a7 100644 --- a/x-pack/docs/en/rest-api/security/delete-tokens.asciidoc +++ b/x-pack/docs/en/rest-api/security/invalidate-tokens.asciidoc @@ -1,8 +1,8 @@ [role="xpack"] [[security-api-invalidate-token]] -=== Delete token API +=== Invalidate token API -Invalidates a bearer token for access without requiring basic authentication. +Invalidates an access token or a refresh token. ==== Request @@ -10,22 +10,30 @@ Invalidates a bearer token for access without requiring basic authentication. ==== Description -The tokens returned by the <> have a +The access tokens returned by the <> have a finite period of time for which they are valid and after that time period, they can no longer be used. That time period is defined by the `xpack.security.authc.token.timeout` setting. For more information, see <>. -If you want to invalidate a token immediately, use this delete token API. +The refresh tokens returned by the <> are +only valid for 24 hours. They can also be used exactly once. + +If you want to invalidate an access or refresh token immediately, use this invalidate token API. ==== Request Body The following parameters can be specified in the body of a DELETE request and -pertain to deleting a token: +pertain to invalidating a token: + +`token` (optional):: +(string) An access token. This parameter cannot be used when `refresh_token` is used. + +`refresh_token` (optional):: +(string) A refresh token. This parameter cannot be used when `token` is used. -`token` (required):: -(string) An access token. +NOTE: One of `token` or `refresh_token` parameters is required. ==== Examples @@ -40,6 +48,17 @@ DELETE /_xpack/security/oauth2/token -------------------------------------------------- // NOTCONSOLE +whereas the following example invalidates the specified refresh token immediately: + +[source,js] +-------------------------------------------------- +DELETE /_xpack/security/oauth2/token +{ + "refresh_token" : "movUJjPGRRC0PQ7+NW0eag" +} +-------------------------------------------------- +// NOTCONSOLE + A successful call returns a JSON structure that indicates whether the token has already been invalidated. diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index b0077dc1ba9d4..3c3b45b793a54 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -785,6 +785,7 @@ for `http` or `443` for `https`). These values must be aligned with the URLs used in the {es} configuration for `sp.acs` and `sp.logout`. +[[saml-kibana-basic]] ==== Supporting SAML and basic authentication in {kib} The SAML support in {kib} is designed on the expectation that it will be the diff --git a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc index d13b722bb73ef..f7ae06ad9648d 100644 --- a/x-pack/docs/en/watcher/actions/pagerduty.asciidoc +++ b/x-pack/docs/en/watcher/actions/pagerduty.asciidoc @@ -70,7 +70,7 @@ payload as well as an array of contexts to the action. |====== | Name |Required | Description | `account` | no | The account to use, falls back to the default one. - The account needs a `service_key_api` attribute. + The account needs a `service_api_key` attribute. |====== diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 797c08cc973ee..bd22b85684ca4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.ccr.action; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -62,6 +64,8 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor { + private static final Logger logger = LogManager.getLogger(ShardFollowTasksExecutor.class); + private final Client client; private final ThreadPool threadPool; private final ClusterService clusterService; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 5be1738941109..95f6f860b3d9b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -808,7 +808,7 @@ private BooleanSupplier hasFollowIndexBeenClosed(String indexName) { AtomicBoolean closed = new AtomicBoolean(false); clusterService.addListener(event -> { IndexMetaData indexMetaData = event.state().metaData().index(indexName); - if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { + if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) { closed.set(true); } }); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java index ee1135ab53fe4..5a496a2979a5b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java @@ -158,7 +158,7 @@ private synchronized DirectoryReader getOrOpenReader() throws IOException { listeners.beforeRefresh(); } reader = DirectoryReader.open(engineConfig.getStore().directory()); - searcherFactory.processReaders(reader, null); + processReaders(reader, null); reader = lastOpenedReader = wrapReader(reader, Function.identity()); reader.getReaderCacheHelper().addClosedListener(this::onReaderClosed); for (ReferenceManager.RefreshListener listeners : config ().getInternalRefreshListener()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 18ded8d585078..657d2c02e8dfa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.license; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -56,6 +58,7 @@ * the license changes are detected in the cluster state. */ public class LicenseService extends AbstractLifecycleComponent implements ClusterStateListener, SchedulerEngine.Listener { + private static final Logger logger = LogManager.getLogger(LicenseService.class); public static final Setting SELF_GENERATED_LICENSE_TYPE = new Setting<>("xpack.license.self_generated.type", (s) -> "basic", (s) -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java index 75b5fa05edbc7..e6cd2ed176c9c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClient.java @@ -6,13 +6,15 @@ package org.elasticsearch.xpack.core; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.license.LicensingClient; import org.elasticsearch.protocol.xpack.XPackInfoRequest; import org.elasticsearch.protocol.xpack.XPackInfoResponse; -import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeIndexAction; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeRequest; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction.FreezeResponse; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackInfoRequestBuilder; import org.elasticsearch.xpack.core.ccr.client.CcrClient; @@ -25,6 +27,7 @@ import java.util.Collections; import java.util.Map; import java.util.Objects; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.BASIC_AUTH_HEADER; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; @@ -106,7 +109,20 @@ public void info(XPackInfoRequest request, ActionListener lis client.execute(XPackInfoAction.INSTANCE, request, listener); } - public void freeze(TransportFreezeIndexAction.FreezeRequest request, ActionListener listener) { - client.execute(TransportFreezeIndexAction.FreezeIndexAction.INSTANCE, request, listener); + /** + * Freezes or unfreeze one or more indices + */ + public void freeze(FreezeRequest request, ActionListener listener) { + client.execute(FreezeIndexAction.INSTANCE, request, listener); + } + + /** + * Freeze or unfreeze one or more indices + */ + public FreezeResponse freeze(FreezeRequest request) + throws ExecutionException, InterruptedException { + PlainActionFuture future = new PlainActionFuture<>(); + freeze(request, future); + return future.get(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 12bc2af5e57f6..159a0889ae11c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -36,6 +36,7 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.Transport; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.beats.BeatsFeatureSetUsage; @@ -345,7 +346,8 @@ public List getClientActions() { ExplainLifecycleAction.INSTANCE, RemoveIndexLifecyclePolicyAction.INSTANCE, MoveToStepAction.INSTANCE, - RetryAction.INSTANCE + RetryAction.INSTANCE, + TransportFreezeIndexAction.FreezeIndexAction.INSTANCE ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index 4b3f0add08733..b4ff04c65154f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -63,6 +63,7 @@ import org.elasticsearch.xpack.core.action.XPackInfoAction; import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.ml.MlMetadata; +import org.elasticsearch.xpack.core.rest.action.RestFreezeIndexAction; import org.elasticsearch.xpack.core.rest.action.RestXPackInfoAction; import org.elasticsearch.xpack.core.rest.action.RestXPackUsageAction; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; @@ -297,6 +298,7 @@ public List getRestHandlers(Settings settings, RestController restC List handlers = new ArrayList<>(); handlers.add(new RestXPackInfoAction(settings, restController)); handlers.add(new RestXPackUsageAction(settings, restController)); + handlers.add(new RestFreezeIndexAction(settings, restController)); handlers.addAll(licensing.getRestHandlers(settings, restController, clusterSettings, indexScopedSettings, settingsFilter, indexNameExpressionResolver, nodesInCluster)); return handlers; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java index 6baf0a2b2800e..a3122d1ef5471 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/TransportFreezeIndexAction.java @@ -11,7 +11,10 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.open.OpenIndexResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -20,12 +23,14 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ack.OpenIndexClusterStateUpdateResponse; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.inject.Inject; @@ -41,22 +46,27 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.elasticsearch.action.ValidateActions.addValidationError; public final class TransportFreezeIndexAction extends - TransportMasterNodeAction { + TransportMasterNodeAction { private final DestructiveOperations destructiveOperations; + private final MetaDataIndexStateService indexStateService; @Inject - public TransportFreezeIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, + public TransportFreezeIndexAction(Settings settings, MetaDataIndexStateService indexStateService, TransportService transportService, + ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, DestructiveOperations destructiveOperations) { super(settings, FreezeIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, FreezeRequest::new); this.destructiveOperations = destructiveOperations; + this.indexStateService = indexStateService; } @Override protected String executor() { @@ -64,27 +74,78 @@ protected String executor() { } @Override - protected void doExecute(Task task, FreezeRequest request, ActionListener listener) { + protected void doExecute(Task task, FreezeRequest request, ActionListener listener) { destructiveOperations.failDestructive(request.indices()); super.doExecute(task, request, listener); } @Override - protected AcknowledgedResponse newResponse() { - return new AcknowledgedResponse(); + protected FreezeResponse newResponse() { + return new FreezeResponse(); } - @Override - protected void masterOperation(FreezeRequest request, ClusterState state, ActionListener listener) { - final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request); - if (concreteIndices == null || concreteIndices.length == 0) { - throw new ResourceNotFoundException("index not found"); + private Index[] resolveIndices(FreezeRequest request, ClusterState state) { + List indices = new ArrayList<>(); + for (Index index : indexNameExpressionResolver.concreteIndices(state, request)) { + IndexMetaData metaData = state.metaData().index(index); + Settings settings = metaData.getSettings(); + // only unfreeze if we are frozen and only freeze if we are not frozen already. + // this prevents all indices that are already frozen that match a pattern to + // go through the cycles again. + if ((request.freeze() && FrozenEngine.INDEX_FROZEN.get(settings) == false) || + (request.freeze() == false && FrozenEngine.INDEX_FROZEN.get(settings))) { + indices.add(index); + } } + if (indices.isEmpty() && request.indicesOptions().allowNoIndices() == false) { + throw new ResourceNotFoundException("no index found to " + (request.freeze() ? "freeze" : "unfreeze")); + } + return indices.toArray(Index.EMPTY_ARRAY); + } + @Override + protected void masterOperation(FreezeRequest request, ClusterState state, ActionListener listener) { + final Index[] concreteIndices = resolveIndices(request, state); + if (concreteIndices.length == 0) { + listener.onResponse(new FreezeResponse(true, true)); + return; + } clusterService.submitStateUpdateTask("toggle-frozen-settings", - new AckedClusterStateUpdateTask(Priority.URGENT, request, listener) { + new AckedClusterStateUpdateTask(Priority.URGENT, request, new ActionListener() { + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest() + .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) + .indices(concreteIndices).waitForActiveShards(request.waitForActiveShards()); + indexStateService.openIndex(updateRequest, new ActionListener() { + @Override + public void onResponse(OpenIndexClusterStateUpdateResponse openIndexClusterStateUpdateResponse) { + listener.onResponse(new FreezeResponse(openIndexClusterStateUpdateResponse.isAcknowledged(), + openIndexClusterStateUpdateResponse.isShardsAcknowledged())); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }) { @Override - public ClusterState execute(final ClusterState currentState) { + public ClusterState execute(ClusterState currentState) { + List toClose = new ArrayList<>(); + for (Index index : concreteIndices) { + IndexMetaData metaData = currentState.metaData().index(index); + if (metaData.getState() != IndexMetaData.State.CLOSE) { + toClose.add(index); + } + } + currentState = indexStateService.closeIndices(currentState, toClose.toArray(new Index[0]), toClose.toString()); final MetaData.Builder builder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); for (Index index : concreteIndices) { @@ -96,12 +157,13 @@ public ClusterState execute(final ClusterState currentState) { final Settings.Builder settingsBuilder = Settings.builder() .put(currentState.metaData().index(index).getSettings()) - .put("index.blocks.write", request.freeze()) .put(FrozenEngine.INDEX_FROZEN.getKey(), request.freeze()) .put(IndexSettings.INDEX_SEARCH_THROTTLED.getKey(), request.freeze()); if (request.freeze()) { + settingsBuilder.put("index.blocks.write", true); blocks.addIndexBlock(index.getName(), IndexMetaData.INDEX_WRITE_BLOCK); } else { + settingsBuilder.remove("index.blocks.write"); blocks.removeIndexBlock(index.getName(), IndexMetaData.INDEX_WRITE_BLOCK); } imdBuilder.settings(settingsBuilder); @@ -123,8 +185,17 @@ protected ClusterBlockException checkBlock(FreezeRequest request, ClusterState s indexNameExpressionResolver.concreteIndexNames(state, request)); } - public static class FreezeIndexAction extends Action { + public static class FreezeResponse extends OpenIndexResponse { + public FreezeResponse() { + super(); + } + public FreezeResponse(boolean acknowledged, boolean shardsAcknowledged) { + super(acknowledged, shardsAcknowledged); + } + } + + public static class FreezeIndexAction extends Action { public static final FreezeIndexAction INSTANCE = new FreezeIndexAction(); public static final String NAME = "indices:admin/freeze"; @@ -133,8 +204,8 @@ private FreezeIndexAction() { } @Override - public AcknowledgedResponse newResponse() { - return new AcknowledgedResponse(); + public FreezeResponse newResponse() { + return new FreezeResponse(); } @Override @@ -143,9 +214,9 @@ public FreezeRequestBuilder newRequestBuilder(ElasticsearchClient client) { } } - public static final class FreezeRequestBuilder extends ActionRequestBuilder { + public static final class FreezeRequestBuilder extends ActionRequestBuilder { - protected FreezeRequestBuilder(ElasticsearchClient client, Action action, + protected FreezeRequestBuilder(ElasticsearchClient client, Action action, FreezeRequest request) { super(client, action, request); } @@ -183,7 +254,8 @@ public static class FreezeRequest extends AcknowledgedRequest implements IndicesRequest.Replaceable { private String[] indices; private boolean freeze = true; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, false, true); + private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen(); + private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; public FreezeRequest(String... indices) { this.indices = indices; @@ -198,8 +270,9 @@ public ActionRequestValidationException validate() { return validationException; } - public void setFreeze(boolean freeze) { + public FreezeRequest setFreeze(boolean freeze) { this.freeze = freeze; + return this; } public boolean freeze() { @@ -212,6 +285,7 @@ public void readFrom(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); indices = in.readStringArray(); freeze = in.readBoolean(); + waitForActiveShards = ActiveShardCount.readFrom(in); } @Override @@ -220,6 +294,7 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); out.writeStringArray(indices); out.writeBoolean(freeze); + waitForActiveShards.writeTo(out); } /** @@ -258,5 +333,28 @@ public IndicesRequest indices(String... indices) { this.indices = indices; return this; } + + public ActiveShardCount waitForActiveShards() { + return waitForActiveShards; + } + + /** + * Sets the number of shard copies that should be active for indices opening to return. + * Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy + * (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to + * wait for all shards (primary and all replicas) to be active before returning. + * Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any + * non-negative integer, up to the number of copies per shard (number of replicas + 1), + * to wait for the desired amount of shard copies to become active before returning. + * Indices opening will only wait up until the timeout value for the number of shard copies + * to be active before returning. Check {@link OpenIndexResponse#isShardsAcknowledged()} to + * determine if the requisite shard copies were all started before returning or timing out. + * + * @param waitForActiveShards number of active shard copies to wait on + */ + public FreezeRequest waitForActiveShards(ActiveShardCount waitForActiveShards) { + this.waitForActiveShards = waitForActiveShards; + return this; + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index e4e41697bec62..27e9dcbe86c47 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -61,15 +61,11 @@ public class AnalysisConfig implements ToXContentObject, Writeable { public static final ParseField SUMMARY_COUNT_FIELD_NAME = new ParseField("summary_count_field_name"); public static final ParseField DETECTORS = new ParseField("detectors"); public static final ParseField INFLUENCERS = new ParseField("influencers"); - public static final ParseField OVERLAPPING_BUCKETS = new ParseField("overlapping_buckets"); - public static final ParseField RESULT_FINALIZATION_WINDOW = new ParseField("result_finalization_window"); public static final ParseField MULTIVARIATE_BY_FIELDS = new ParseField("multivariate_by_fields"); public static final String ML_CATEGORY_FIELD = "mlcategory"; public static final Set AUTO_CREATED_FIELDS = new HashSet<>(Collections.singletonList(ML_CATEGORY_FIELD)); - public static final long DEFAULT_RESULT_FINALIZATION_WINDOW = 2L; - // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly public static final ConstructingObjectParser LENIENT_PARSER = createParser(true); public static final ConstructingObjectParser STRICT_PARSER = createParser(false); @@ -94,8 +90,6 @@ private static ConstructingObjectParser createPars builder.setLatency(TimeValue.parseTimeValue(val, LATENCY.getPreferredName())), LATENCY); parser.declareString(Builder::setSummaryCountFieldName, SUMMARY_COUNT_FIELD_NAME); parser.declareStringArray(Builder::setInfluencers, INFLUENCERS); - parser.declareBoolean(Builder::setOverlappingBuckets, OVERLAPPING_BUCKETS); - parser.declareLong(Builder::setResultFinalizationWindow, RESULT_FINALIZATION_WINDOW); parser.declareBoolean(Builder::setMultivariateByFields, MULTIVARIATE_BY_FIELDS); return parser; @@ -112,14 +106,11 @@ private static ConstructingObjectParser createPars private final String summaryCountFieldName; private final List detectors; private final List influencers; - private final Boolean overlappingBuckets; - private final Long resultFinalizationWindow; private final Boolean multivariateByFields; private AnalysisConfig(TimeValue bucketSpan, String categorizationFieldName, List categorizationFilters, CategorizationAnalyzerConfig categorizationAnalyzerConfig, TimeValue latency, String summaryCountFieldName, - List detectors, List influencers, Boolean overlappingBuckets, Long resultFinalizationWindow, - Boolean multivariateByFields) { + List detectors, List influencers, Boolean multivariateByFields) { this.detectors = detectors; this.bucketSpan = bucketSpan; this.latency = latency; @@ -128,8 +119,6 @@ private AnalysisConfig(TimeValue bucketSpan, String categorizationFieldName, Lis this.categorizationFilters = categorizationFilters == null ? null : Collections.unmodifiableList(categorizationFilters); this.summaryCountFieldName = summaryCountFieldName; this.influencers = Collections.unmodifiableList(influencers); - this.overlappingBuckets = overlappingBuckets; - this.resultFinalizationWindow = resultFinalizationWindow; this.multivariateByFields = multivariateByFields; } @@ -146,8 +135,13 @@ public AnalysisConfig(StreamInput in) throws IOException { summaryCountFieldName = in.readOptionalString(); detectors = Collections.unmodifiableList(in.readList(Detector::new)); influencers = Collections.unmodifiableList(in.readList(StreamInput::readString)); - overlappingBuckets = in.readOptionalBoolean(); - resultFinalizationWindow = in.readOptionalLong(); + + // BWC for result_finalization_window and overlapping_buckets + // TODO Remove in 7.0.0 + if (in.getVersion().before(Version.V_6_6_0)) { + in.readOptionalBoolean(); + in.readOptionalLong(); + } multivariateByFields = in.readOptionalBoolean(); // BWC for removed multiple_bucket_spans @@ -185,8 +179,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(summaryCountFieldName); out.writeList(detectors); out.writeStringList(influencers); - out.writeOptionalBoolean(overlappingBuckets); - out.writeOptionalLong(resultFinalizationWindow); + + // BWC for result_finalization_window and overlapping_buckets + // TODO Remove in 7.0.0 + if (out.getVersion().before(Version.V_6_6_0)) { + out.writeOptionalBoolean(null); + out.writeOptionalLong(null); + } out.writeOptionalBoolean(multivariateByFields); // BWC for removed multiple_bucket_spans @@ -291,14 +290,6 @@ public Set extractReferencedFilters() { .flatMap(Set::stream).collect(Collectors.toSet()); } - public Boolean getOverlappingBuckets() { - return overlappingBuckets; - } - - public Long getResultFinalizationWindow() { - return resultFinalizationWindow; - } - public Boolean getMultivariateByFields() { return multivariateByFields; } @@ -394,12 +385,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } builder.endArray(); builder.field(INFLUENCERS.getPreferredName(), influencers); - if (overlappingBuckets != null) { - builder.field(OVERLAPPING_BUCKETS.getPreferredName(), overlappingBuckets); - } - if (resultFinalizationWindow != null) { - builder.field(RESULT_FINALIZATION_WINDOW.getPreferredName(), resultFinalizationWindow); - } if (multivariateByFields != null) { builder.field(MULTIVARIATE_BY_FIELDS.getPreferredName(), multivariateByFields); } @@ -420,8 +405,6 @@ public boolean equals(Object o) { Objects.equals(summaryCountFieldName, that.summaryCountFieldName) && Objects.equals(detectors, that.detectors) && Objects.equals(influencers, that.influencers) && - Objects.equals(overlappingBuckets, that.overlappingBuckets) && - Objects.equals(resultFinalizationWindow, that.resultFinalizationWindow) && Objects.equals(multivariateByFields, that.multivariateByFields); } @@ -429,9 +412,7 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash( bucketSpan, categorizationFieldName, categorizationFilters, categorizationAnalyzerConfig, latency, - summaryCountFieldName, detectors, influencers, overlappingBuckets, resultFinalizationWindow, - multivariateByFields - ); + summaryCountFieldName, detectors, influencers, multivariateByFields); } public static class Builder { @@ -446,8 +427,6 @@ public static class Builder { private CategorizationAnalyzerConfig categorizationAnalyzerConfig; private String summaryCountFieldName; private List influencers = new ArrayList<>(); - private Boolean overlappingBuckets; - private Long resultFinalizationWindow; private Boolean multivariateByFields; public Builder(List detectors) { @@ -464,8 +443,6 @@ public Builder(AnalysisConfig analysisConfig) { this.categorizationAnalyzerConfig = analysisConfig.categorizationAnalyzerConfig; this.summaryCountFieldName = analysisConfig.summaryCountFieldName; this.influencers = new ArrayList<>(analysisConfig.influencers); - this.overlappingBuckets = analysisConfig.overlappingBuckets; - this.resultFinalizationWindow = analysisConfig.resultFinalizationWindow; this.multivariateByFields = analysisConfig.multivariateByFields; } @@ -517,14 +494,6 @@ public void setInfluencers(List influencers) { this.influencers = ExceptionsHelper.requireNonNull(influencers, INFLUENCERS.getPreferredName()); } - public void setOverlappingBuckets(Boolean overlappingBuckets) { - this.overlappingBuckets = overlappingBuckets; - } - - public void setResultFinalizationWindow(Long resultFinalizationWindow) { - this.resultFinalizationWindow = resultFinalizationWindow; - } - public void setMultivariateByFields(Boolean multivariateByFields) { this.multivariateByFields = multivariateByFields; } @@ -536,7 +505,6 @@ public void setMultivariateByFields(Boolean multivariateByFields) { *

  • Check that if non-null Latency is <= MAX_LATENCY
  • *
  • Check there is at least one detector configured
  • *
  • Check all the detectors are configured correctly
  • - *
  • Check that OVERLAPPING_BUCKETS is set appropriately
  • *
  • Check that MULTIPLE_BUCKETSPANS are set appropriately
  • *
  • If Per Partition normalization is configured at least one detector * must have a partition field and no influences can be used
  • @@ -555,17 +523,13 @@ public AnalysisConfig build() { verifyMlCategoryIsUsedWhenCategorizationFieldNameIsSet(); verifyCategorizationAnalyzer(); verifyCategorizationFilters(); - checkFieldIsNotNegativeIfSpecified(RESULT_FINALIZATION_WINDOW.getPreferredName(), resultFinalizationWindow); verifyNoMetricFunctionsWhenSummaryCountFieldNameIsSet(); - overlappingBuckets = verifyOverlappingBucketsConfig(overlappingBuckets, detectors); - verifyNoInconsistentNestedFieldNames(); return new AnalysisConfig(bucketSpan, categorizationFieldName, categorizationFilters, categorizationAnalyzerConfig, - latency, summaryCountFieldName, detectors, influencers, overlappingBuckets, - resultFinalizationWindow, multivariateByFields); + latency, summaryCountFieldName, detectors, influencers, multivariateByFields); } private void verifyNoMetricFunctionsWhenSummaryCountFieldNameIsSet() { @@ -576,13 +540,6 @@ private void verifyNoMetricFunctionsWhenSummaryCountFieldNameIsSet() { } } - private static void checkFieldIsNotNegativeIfSpecified(String fieldName, Long value) { - if (value != null && value < 0) { - String msg = Messages.getMessage(Messages.JOB_CONFIG_FIELD_VALUE_TOO_LOW, fieldName, 0, value); - throw ExceptionsHelper.badRequestException(msg); - } - } - private void verifyDetectorAreDefined() { if (detectors == null || detectors.isEmpty()) { throw ExceptionsHelper.badRequestException(Messages.getMessage(Messages.JOB_CONFIG_NO_DETECTORS)); @@ -697,25 +654,5 @@ private static boolean isValidRegex(String exp) { return false; } } - - private static Boolean verifyOverlappingBucketsConfig(Boolean overlappingBuckets, List detectors) { - // If any detector function is rare/freq_rare, mustn't use overlapping buckets - boolean mustNotUse = false; - - List illegalFunctions = new ArrayList<>(); - for (Detector d : detectors) { - if (Detector.NO_OVERLAPPING_BUCKETS_FUNCTIONS.contains(d.getFunction())) { - illegalFunctions.add(d.getFunction()); - mustNotUse = true; - } - } - - if (Boolean.TRUE.equals(overlappingBuckets) && mustNotUse) { - throw ExceptionsHelper.badRequestException( - Messages.getMessage(Messages.JOB_CONFIG_OVERLAPPING_BUCKETS_INCOMPATIBLE_FUNCTION, illegalFunctions.toString())); - } - - return overlappingBuckets; - } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java index 9b42c6078b08a..727500328db97 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/Detector.java @@ -185,25 +185,6 @@ private static ObjectParser createParser(boolean ignoreUnknownFie DetectorFunction.HIGH_NON_ZERO_COUNT ); - /** - * The set of functions that must not be used with overlapping buckets - */ - public static final EnumSet NO_OVERLAPPING_BUCKETS_FUNCTIONS = EnumSet.of( - DetectorFunction.RARE, - DetectorFunction.FREQ_RARE - ); - - /** - * The set of functions that should not be used with overlapping buckets - * as they gain no benefit but have overhead - */ - public static final EnumSet OVERLAPPING_BUCKETS_FUNCTIONS_NOT_NEEDED = EnumSet.of( - DetectorFunction.MIN, - DetectorFunction.MAX, - DetectorFunction.TIME_OF_DAY, - DetectorFunction.TIME_OF_WEEK - ); - /** * Functions that do not support rule conditions: *
      diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java index 24c68eb98de21..4039d0be59e0f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/messages/Messages.java @@ -23,11 +23,9 @@ public final class Messages { "script_fields cannot be used in combination with aggregations"; public static final String DATAFEED_CONFIG_INVALID_OPTION_VALUE = "Invalid {0} value ''{1}'' in datafeed configuration"; public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_SMALL = - "delayed_data_check_window [{0}] must be greater than the bucket_span [{1}]"; - public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_TOO_LARGE = - "delayed_data_check_window [{0}] must be less than or equal to [24h]"; + "delayed_data_check_config: check_window [{0}] must be greater than the bucket_span [{1}]"; public static final String DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS = - "delayed_data_check_window [{0}] must be less than 10,000x the bucket_span [{1}]"; + "delayed_data_check_config: check_window [{0}] must be less than 10,000x the bucket_span [{1}]"; public static final String DATAFEED_DOES_NOT_SUPPORT_JOB_WITH_LATENCY = "A job configured with datafeed cannot support latency"; public static final String DATAFEED_NOT_FOUND = "No datafeed with id [{0}] exists"; @@ -142,8 +140,6 @@ public final class Messages { public static final String JOB_CONFIG_NO_DETECTORS = "No detectors configured"; public static final String JOB_CONFIG_OVERFIELD_INCOMPATIBLE_FUNCTION = "over_field_name cannot be used with function ''{0}''"; - public static final String JOB_CONFIG_OVERLAPPING_BUCKETS_INCOMPATIBLE_FUNCTION = - "Overlapping buckets cannot be used with function ''{0}''"; public static final String JOB_CONFIG_UNKNOWN_FUNCTION = "Unknown function ''{0}''"; public static final String JOB_CONFIG_UPDATE_ANALYSIS_LIMITS_MODEL_MEMORY_LIMIT_CANNOT_BE_DECREASED = "Invalid update value for analysis_limits: model_memory_limit cannot be decreased below current usage; " + diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index 7308ef1cc30c3..1b314a4a2f3cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -225,12 +225,6 @@ public static void addJobConfigFields(XContentBuilder builder) throws IOExceptio .startObject(AnalysisConfig.INFLUENCERS.getPreferredName()) .field(TYPE, KEYWORD) .endObject() - .startObject(AnalysisConfig.OVERLAPPING_BUCKETS.getPreferredName()) - .field(TYPE, BOOLEAN) - .endObject() - .startObject(AnalysisConfig.RESULT_FINALIZATION_WINDOW.getPreferredName()) - .field(TYPE, LONG) // TODO This should be made a time value - .endObject() .startObject(AnalysisConfig.MULTIVARIATE_BY_FIELDS.getPreferredName()) .field(TYPE, BOOLEAN) .endObject() diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java index fa0532f382b96..4512ab1a974ab 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ReservedFieldNames.java @@ -207,8 +207,6 @@ public final class ReservedFieldNames { AnalysisConfig.SUMMARY_COUNT_FIELD_NAME.getPreferredName(), AnalysisConfig.DETECTORS.getPreferredName(), AnalysisConfig.INFLUENCERS.getPreferredName(), - AnalysisConfig.OVERLAPPING_BUCKETS.getPreferredName(), - AnalysisConfig.RESULT_FINALIZATION_WINDOW.getPreferredName(), AnalysisConfig.MULTIVARIATE_BY_FIELDS.getPreferredName(), AnalysisLimits.MODEL_MEMORY_LIMIT.getPreferredName(), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java new file mode 100644 index 0000000000000..9604cdd8b3183 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rest/action/RestFreezeIndexAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.rest.action; + +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.XPackClient; +import org.elasticsearch.xpack.core.action.TransportFreezeIndexAction; +import org.elasticsearch.xpack.core.rest.XPackRestHandler; + +public final class RestFreezeIndexAction extends XPackRestHandler { + public RestFreezeIndexAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_freeze", this); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_unfreeze", this); + } + + @Override + protected RestChannelConsumer doPrepareRequest(RestRequest request, XPackClient client) { + boolean freeze = request.path().endsWith("/_freeze"); + TransportFreezeIndexAction.FreezeRequest freezeRequest = + new TransportFreezeIndexAction.FreezeRequest(Strings.splitStringByCommaToArray(request.param("index"))); + freezeRequest.timeout(request.paramAsTime("timeout", freezeRequest.timeout())); + freezeRequest.masterNodeTimeout(request.paramAsTime("master_timeout", freezeRequest.masterNodeTimeout())); + freezeRequest.indicesOptions(IndicesOptions.fromRequest(request, freezeRequest.indicesOptions())); + String waitForActiveShards = request.param("wait_for_active_shards"); + if (waitForActiveShards != null) { + freezeRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards)); + } + freezeRequest.setFreeze(freeze); + return channel -> client.freeze(freezeRequest, new RestToXContentListener<>(channel)); + } + + @Override + public String getName() { + return "freeze_index"; + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java index 3bc405103f155..07a9cd2633d5a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java @@ -11,6 +11,8 @@ import io.netty.channel.ChannelOutboundHandlerAdapter; import io.netty.channel.ChannelPromise; import io.netty.handler.ssl.SslHandler; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -47,6 +49,7 @@ * Implementation of a transport that extends the {@link Netty4Transport} to add SSL and IP Filtering */ public class SecurityNetty4Transport extends Netty4Transport { + private static final Logger logger = LogManager.getLogger(SecurityNetty4Transport.class); private final SSLService sslService; private final SSLConfiguration sslConfiguration; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java new file mode 100644 index 0000000000000..b67258dd9b3d0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexRecoveryTests.java @@ -0,0 +1,43 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.engine; + +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingHelper; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class FrozenIndexRecoveryTests extends IndexShardTestCase { + + /** + * Make sure we can recover from a frozen engine + */ + public void testRecoverFromFrozenPrimary() throws IOException { + IndexShard indexShard = newStartedShard(true); + indexDoc(indexShard, "_doc", "1"); + indexDoc(indexShard, "_doc", "2"); + indexDoc(indexShard, "_doc", "3"); + indexShard.close("test", true); + final ShardRouting shardRouting = indexShard.routingEntry(); + IndexShard frozenShard = reinitShard(indexShard, ShardRoutingHelper.initWithSameId(shardRouting, + shardRouting.primary() ? RecoverySource.ExistingStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE + ), FrozenEngine::new); + recoverShardFromStore(frozenShard); + assertThat(frozenShard.getMaxSeqNoOfUpdatesOrDeletes(), equalTo(frozenShard.seqNoStats().getMaxSeqNo())); + assertDocCount(frozenShard, 3); + + IndexShard replica = newShard(false, Settings.EMPTY, FrozenEngine::new); + recoverReplica(replica, frozenShard, true); + assertDocCount(replica, 3); + closeShards(frozenShard, replica); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java index 637a5315d2e8e..884dafdcd395b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/FrozenIndexTests.java @@ -6,14 +6,13 @@ package org.elasticsearch.index.engine; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -39,6 +38,7 @@ import java.io.IOException; import java.util.Collection; +import java.util.EnumSet; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; @@ -58,14 +58,8 @@ public void testCloseFreezeAndOpen() throws ExecutionException, InterruptedExcep client().prepareIndex("index", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - client().admin().indices().prepareFlush("index").get(); - client().admin().indices().prepareClose("index").get(); XPackClient xPackClient = new XPackClient(client()); - PlainActionFuture future = new PlainActionFuture<>(); - xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"), future); - assertAcked(future.get()); - assertAcked(client().admin().indices().prepareOpen("index").setWaitForActiveShards(ActiveShardCount.DEFAULT)); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); expectThrows(ClusterBlockException.class, () -> client().prepareIndex("index", "_doc", "4").setSource("field", "value") .setRefreshPolicy(IMMEDIATE).get()); IndicesService indexServices = getInstanceFromNode(IndicesService.class); @@ -101,7 +95,7 @@ public void testCloseFreezeAndOpen() throws ExecutionException, InterruptedExcep } while (searchResponse.getHits().getHits().length > 0); } - public void testSearchAndGetAPIsAreThrottled() throws ExecutionException, InterruptedException, IOException { + public void testSearchAndGetAPIsAreThrottled() throws InterruptedException, IOException, ExecutionException { XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc") .startObject("properties").startObject("field").field("type", "text").field("term_vector", "with_positions_offsets_payloads") .endObject().endObject() @@ -110,15 +104,8 @@ public void testSearchAndGetAPIsAreThrottled() throws ExecutionException, Interr for (int i = 0; i < 10; i++) { client().prepareIndex("index", "_doc", "" + i).setSource("field", "foo bar baz").get(); } - client().admin().indices().prepareFlush("index").get(); - client().admin().indices().prepareClose("index").get(); XPackClient xPackClient = new XPackClient(client()); - PlainActionFuture future = new PlainActionFuture<>(); - TransportFreezeIndexAction.FreezeRequest request = - new TransportFreezeIndexAction.FreezeRequest("index"); - xPackClient.freeze(request, future); - assertAcked(future.get()); - assertAcked(client().admin().indices().prepareOpen("index").setWaitForActiveShards(ActiveShardCount.DEFAULT)); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); int numRequests = randomIntBetween(20, 50); CountDownLatch latch = new CountDownLatch(numRequests); ActionListener listener = ActionListener.wrap(latch::countDown); @@ -152,21 +139,17 @@ public void testSearchAndGetAPIsAreThrottled() throws ExecutionException, Interr assertEquals(numRefreshes, index.getTotal().refresh.getTotal()); } - public void testFreezeAndUnfreeze() throws ExecutionException, InterruptedException { + public void testFreezeAndUnfreeze() throws InterruptedException, ExecutionException { createIndex("index", Settings.builder().put("index.number_of_shards", 2).build()); client().prepareIndex("index", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); client().prepareIndex("index", "_doc", "3").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - client().admin().indices().prepareFlush("index").get(); - client().admin().indices().prepareClose("index").get(); + if (randomBoolean()) { + // sometimes close it + assertAcked(client().admin().indices().prepareClose("index").get()); + } XPackClient xPackClient = new XPackClient(client()); - PlainActionFuture future = new PlainActionFuture<>(); - TransportFreezeIndexAction.FreezeRequest request = - new TransportFreezeIndexAction.FreezeRequest("index"); - xPackClient.freeze(request, future); - assertAcked(future.get()); - assertAcked(client().admin().indices().prepareOpen("index").setWaitForActiveShards(ActiveShardCount.DEFAULT)); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -175,12 +158,7 @@ public void testFreezeAndUnfreeze() throws ExecutionException, InterruptedExcept IndexShard shard = indexService.getShard(0); assertEquals(0, shard.refreshStats().getTotal()); } - client().admin().indices().prepareClose("index").get(); - request.setFreeze(false); - PlainActionFuture future1= new PlainActionFuture<>(); - xPackClient.freeze(request, future1); - assertAcked(future1.get()); - assertAcked(client().admin().indices().prepareOpen("index").setWaitForActiveShards(ActiveShardCount.DEFAULT)); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index").setFreeze(false))); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); Index index = resolveIndex("index"); @@ -193,16 +171,63 @@ public void testFreezeAndUnfreeze() throws ExecutionException, InterruptedExcept client().prepareIndex("index", "_doc", "4").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); } - public void testIndexMustBeClosed() { + private void assertIndexFrozen(String idx) { + IndicesService indexServices = getInstanceFromNode(IndicesService.class); + Index index = resolveIndex(idx); + IndexService indexService = indexServices.indexServiceSafe(index); + assertTrue(indexService.getIndexSettings().isSearchThrottled()); + assertTrue(FrozenEngine.INDEX_FROZEN.get(indexService.getIndexSettings().getSettings())); + } + + public void testDoubleFreeze() throws ExecutionException, InterruptedException { createIndex("test-idx", Settings.builder().put("index.number_of_shards", 2).build()); XPackClient xPackClient = new XPackClient(client()); - PlainActionFuture future = new PlainActionFuture<>(); - TransportFreezeIndexAction.FreezeRequest request = - new TransportFreezeIndexAction.FreezeRequest("test-idx"); - xPackClient.freeze(request, future); - ExecutionException executionException = expectThrows(ExecutionException.class, () -> future.get()); - assertThat(executionException.getCause(), Matchers.instanceOf(IllegalStateException.class)); - assertEquals("index [test-idx] is not closed", executionException.getCause().getMessage()); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx"))); + ExecutionException executionException = expectThrows(ExecutionException.class, + () -> xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx") + .indicesOptions(new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), + EnumSet.of(IndicesOptions.WildcardStates.OPEN))))); + assertEquals("no index found to freeze", executionException.getCause().getMessage()); + } + + public void testUnfreezeClosedIndices() throws ExecutionException, InterruptedException { + createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); + client().prepareIndex("idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + createIndex("idx-closed", Settings.builder().put("index.number_of_shards", 1).build()); + client().prepareIndex("idx-closed", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + XPackClient xPackClient = new XPackClient(client()); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertAcked(client().admin().indices().prepareClose("idx-closed").get()); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx*").setFreeze(false) + .indicesOptions(IndicesOptions.strictExpand()))); + ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get(); + assertEquals(IndexMetaData.State.CLOSE, stateResponse.getState().getMetaData().index("idx-closed").getState()); + assertEquals(IndexMetaData.State.OPEN, stateResponse.getState().getMetaData().index("idx").getState()); + assertHitCount(client().prepareSearch().get(), 1L); + } + + public void testFreezePattern() throws ExecutionException, InterruptedException { + createIndex("test-idx", Settings.builder().put("index.number_of_shards", 1).build()); + client().prepareIndex("test-idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + createIndex("test-idx-1", Settings.builder().put("index.number_of_shards", 1).build()); + client().prepareIndex("test-idx-1", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + XPackClient xPackClient = new XPackClient(client()); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test-idx"))); + assertIndexFrozen("test-idx"); + + IndicesStatsResponse index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); + assertEquals(0, index.getTotal().refresh.getTotal()); + assertHitCount(client().prepareSearch("test-idx").setIndicesOptions(IndicesOptions.STRICT_EXPAND_OPEN_FORBID_CLOSED).get(), 1); + index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); + assertEquals(1, index.getTotal().refresh.getTotal()); + + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("test*"))); + assertIndexFrozen("test-idx"); + assertIndexFrozen("test-idx-1"); + index = client().admin().indices().prepareStats("test-idx").clear().setRefresh(true).get(); + assertEquals(1, index.getTotal().refresh.getTotal()); + index = client().admin().indices().prepareStats("test-idx-1").clear().setRefresh(true).get(); + assertEquals(0, index.getTotal().refresh.getTotal()); } public void testCanMatch() throws ExecutionException, InterruptedException, IOException { @@ -232,15 +257,9 @@ public void testCanMatch() throws ExecutionException, InterruptedException, IOEx Strings.EMPTY_ARRAY, false, new AliasFilter(null, Strings.EMPTY_ARRAY), 1f, true, null, null))); } - client().admin().indices().prepareFlush("index").get(); - client().admin().indices().prepareClose("index").get(); + XPackClient xPackClient = new XPackClient(client()); - PlainActionFuture future = new PlainActionFuture<>(); - TransportFreezeIndexAction.FreezeRequest request = - new TransportFreezeIndexAction.FreezeRequest("index"); - xPackClient.freeze(request, future); - assertAcked(future.get()); - assertAcked(client().admin().indices().prepareOpen("index").setWaitForActiveShards(ActiveShardCount.DEFAULT)); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("index"))); { IndicesService indexServices = getInstanceFromNode(IndicesService.class); @@ -266,4 +285,43 @@ public void testCanMatch() throws ExecutionException, InterruptedException, IOEx assertEquals(0, response.getTotal().refresh.getTotal()); // never opened a reader } } + + public void testWriteToFrozenIndex() throws ExecutionException, InterruptedException { + createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); + client().prepareIndex("idx", "_doc", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + XPackClient xPackClient = new XPackClient(client()); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertIndexFrozen("idx"); + expectThrows(ClusterBlockException.class, () -> + client().prepareIndex("idx", "_doc", "2").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get()); + } + + public void testIgnoreUnavailable() throws ExecutionException, InterruptedException { + createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); + createIndex("idx-close", Settings.builder().put("index.number_of_shards", 1).build()); + assertAcked(client().admin().indices().prepareClose("idx-close")); + XPackClient xPackClient = new XPackClient(client()); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx*", "not_available") + .indicesOptions(IndicesOptions.fromParameters(null, "true", null, null, IndicesOptions.strictExpandOpen())))); + assertIndexFrozen("idx"); + assertEquals(IndexMetaData.State.CLOSE, + client().admin().cluster().prepareState().get().getState().metaData().index("idx-close").getState()); + } + + public void testUnfreezeClosedIndex() throws ExecutionException, InterruptedException { + createIndex("idx", Settings.builder().put("index.number_of_shards", 1).build()); + XPackClient xPackClient = new XPackClient(client()); + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx"))); + assertAcked(client().admin().indices().prepareClose("idx")); + assertEquals(IndexMetaData.State.CLOSE, + client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); + expectThrows(ExecutionException.class, + () -> xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("id*").setFreeze(false) + .indicesOptions(new IndicesOptions(EnumSet.noneOf(IndicesOptions.Option.class), + EnumSet.of(IndicesOptions.WildcardStates.OPEN))))); + // we don't resolve to closed indices + assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest("idx").setFreeze(false))); + assertEquals(IndexMetaData.State.OPEN, + client().admin().cluster().prepareState().get().getState().metaData().index("idx").getState()); + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java index c95403a112d58..0e38be0c04df1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfigTests.java @@ -90,12 +90,6 @@ public static AnalysisConfig.Builder createRandomized() { if (randomBoolean()) { builder.setMultivariateByFields(randomBoolean()); } - if (randomBoolean()) { - builder.setOverlappingBuckets(randomBoolean()); - } - if (randomBoolean()) { - builder.setResultFinalizationWindow(randomNonNegativeLong()); - } builder.setInfluencers(Arrays.asList(generateRandomStringArray(10, 10, false))); @@ -564,94 +558,6 @@ public void testVerify_GivenFieldIsControlField() { RecordWriter.CONTROL_FIELD_NAME), e.getMessage()); } - public void testVerify_OverlappingBuckets() { - List detectors; - Detector detector; - - boolean onByDefault = false; - - // Uncomment this when overlappingBuckets turned on by default - if (onByDefault) { - // Test overlappingBuckets unset - AnalysisConfig.Builder analysisConfig = createValidConfig(); - analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); - detectors = new ArrayList<>(); - detector = new Detector.Builder("count", null).build(); - detectors.add(detector); - detector = new Detector.Builder("mean", "value").build(); - detectors.add(detector); - analysisConfig.setDetectors(detectors); - AnalysisConfig ac = analysisConfig.build(); - assertTrue(ac.getOverlappingBuckets()); - - // Test overlappingBuckets unset - analysisConfig = createValidConfig(); - analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); - detectors = new ArrayList<>(); - detector = new Detector.Builder("count", null).build(); - detectors.add(detector); - detector = new Detector.Builder("rare", "value").build(); - detectors.add(detector); - analysisConfig.setDetectors(detectors); - ac = analysisConfig.build(); - assertFalse(ac.getOverlappingBuckets()); - - // Test overlappingBuckets unset - analysisConfig = createValidConfig(); - analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); - detectors = new ArrayList<>(); - detector = new Detector.Builder("count", null).build(); - detectors.add(detector); - detector = new Detector.Builder("min", "value").build(); - detectors.add(detector); - detector = new Detector.Builder("max", "value").build(); - detectors.add(detector); - analysisConfig.setDetectors(detectors); - ac = analysisConfig.build(); - assertFalse(ac.getOverlappingBuckets()); - } - - // Test overlappingBuckets set - AnalysisConfig.Builder analysisConfig = createValidConfig(); - analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); - detectors = new ArrayList<>(); - detector = new Detector.Builder("count", null).build(); - detectors.add(detector); - Detector.Builder builder = new Detector.Builder("rare", null); - builder.setByFieldName("value"); - detectors.add(builder.build()); - analysisConfig.setOverlappingBuckets(false); - analysisConfig.setDetectors(detectors); - assertFalse(analysisConfig.build().getOverlappingBuckets()); - - // Test overlappingBuckets set - analysisConfig = createValidConfig(); - analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); - analysisConfig.setOverlappingBuckets(true); - detectors = new ArrayList<>(); - detector = new Detector.Builder("count", null).build(); - detectors.add(detector); - builder = new Detector.Builder("rare", null); - builder.setByFieldName("value"); - detectors.add(builder.build()); - analysisConfig.setDetectors(detectors); - ElasticsearchException e = ESTestCase.expectThrows(ElasticsearchException.class, analysisConfig::build); - assertEquals("Overlapping buckets cannot be used with function '[rare]'", e.getMessage()); - - // Test overlappingBuckets set - analysisConfig = createValidConfig(); - analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(5000L)); - analysisConfig.setOverlappingBuckets(false); - detectors = new ArrayList<>(); - detector = new Detector.Builder("count", null).build(); - detectors.add(detector); - detector = new Detector.Builder("mean", "value").build(); - detectors.add(detector); - analysisConfig.setDetectors(detectors); - AnalysisConfig ac = analysisConfig.build(); - assertFalse(ac.getOverlappingBuckets()); - } - public void testVerify_GivenMetricAndSummaryCountField() { Detector d = new Detector.Builder("metric", "my_metric").build(); AnalysisConfig.Builder ac = new AnalysisConfig.Builder(Collections.singletonList(d)); @@ -728,7 +634,7 @@ private static AnalysisConfig.Builder createValidCategorizationConfig() { @Override protected AnalysisConfig mutateInstance(AnalysisConfig instance) { AnalysisConfig.Builder builder = new AnalysisConfig.Builder(instance); - switch (between(0, 10)) { + switch (between(0, 8)) { case 0: List detectors = new ArrayList<>(instance.getDetectors()); Detector.Builder detector = new Detector.Builder(); @@ -806,20 +712,6 @@ protected AnalysisConfig mutateInstance(AnalysisConfig instance) { builder.setInfluencers(influencers); break; case 8: - if (instance.getOverlappingBuckets() == null) { - builder.setOverlappingBuckets(randomBoolean()); - } else { - builder.setOverlappingBuckets(instance.getOverlappingBuckets() == false); - } - break; - case 9: - if (instance.getResultFinalizationWindow() == null) { - builder.setResultFinalizationWindow(between(1, 100) * 1000L); - } else { - builder.setResultFinalizationWindow(instance.getResultFinalizationWindow() + (between(1, 100) * 1000)); - } - break; - case 10: if (instance.getMultivariateByFields() == null) { builder.setMultivariateByFields(randomBoolean()); } else { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java index 3d5533fed08ed..4cbeaf1dc482c 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/test/java/org/elasticsearch/xpack/ml/integration/UpdateInterimResultsIT.java @@ -43,7 +43,6 @@ public void test() throws Exception { AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder( Collections.singletonList(new Detector.Builder("max", "value").build())); analysisConfig.setBucketSpan(TimeValue.timeValueSeconds(BUCKET_SPAN_SECONDS)); - analysisConfig.setOverlappingBuckets(true); DataDescription.Builder dataDescription = new DataDescription.Builder(); dataDescription.setTimeFormat("epoch"); Job.Builder job = new Job.Builder(JOB_ID); @@ -77,10 +76,9 @@ public void test() throws Exception { // We might need to retry this while waiting for a refresh assertBusy(() -> { List firstInterimBuckets = getInterimResults(job.getId()); - assertThat("interim buckets were: " + firstInterimBuckets, firstInterimBuckets.size(), equalTo(2)); - assertThat(firstInterimBuckets.get(0).getTimestamp().getTime(), equalTo(1400039000000L)); - assertThat(firstInterimBuckets.get(1).getTimestamp().getTime(), equalTo(1400040000000L)); - assertThat(firstInterimBuckets.get(1).getRecords().get(0).getActual().get(0), equalTo(16.0)); + assertThat("interim buckets were: " + firstInterimBuckets, firstInterimBuckets.size(), equalTo(1)); + assertThat(firstInterimBuckets.get(0).getTimestamp().getTime(), equalTo(1400040000000L)); + assertThat(firstInterimBuckets.get(0).getRecords().get(0).getActual().get(0), equalTo(16.0)); }); // push 1 more record, flush (with interim), check same interim result @@ -90,9 +88,7 @@ public void test() throws Exception { assertBusy(() -> { List secondInterimBuckets = getInterimResults(job.getId()); - assertThat(secondInterimBuckets.get(0).getTimestamp().getTime(), equalTo(1400039000000L)); - assertThat(secondInterimBuckets.get(1).getTimestamp().getTime(), equalTo(1400040000000L)); - assertThat(secondInterimBuckets.get(1).getRecords().get(0).getActual().get(0), equalTo(16.0)); + assertThat(secondInterimBuckets.get(0).getTimestamp().getTime(), equalTo(1400040000000L)); }); // push rest of data, close, verify no interim results @@ -103,11 +99,11 @@ public void test() throws Exception { // Verify interim results have been replaced with finalized results GetBucketsAction.Request bucketRequest = new GetBucketsAction.Request(job.getId()); - bucketRequest.setTimestamp("1400039500000"); + bucketRequest.setTimestamp("1400040000000"); bucketRequest.setExpand(true); List bucket = client().execute(GetBucketsAction.INSTANCE, bucketRequest).get().getBuckets().results(); assertThat(bucket.size(), equalTo(1)); - assertThat(bucket.get(0).getRecords().get(0).getActual().get(0), equalTo(14.0)); + assertThat(bucket.get(0).getRecords().get(0).getActual().get(0), equalTo(16.0)); } private String createData(int halfBuckets) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 7f84e5d96c85a..1d7623e69e4de 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -786,6 +786,8 @@ private void addDocMappingIfMissing(String alias, CheckedSupplier { + private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); + private final AutodetectProcessManager autodetectProcessManager; private final MlMemoryTracker memoryTracker; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java index a9aeb398141d7..6cf1ffac1c1c2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactory.java @@ -21,8 +21,8 @@ public class DelayedDataDetectorFactory { // There are eight 15min buckets in a two hour span, so matching that number as the fallback for very long buckets - private static final int FALLBACK_NUMBER_OF_BUCKETS_TO_SPAN = 8; - private static final TimeValue DEFAULT_CHECK_WINDOW = TimeValue.timeValueHours(2); + private static final int DEFAULT_NUMBER_OF_BUCKETS_TO_SPAN = 8; + private static final long DEFAULT_CHECK_WINDOW_MS = 7_200_000L; // 2 hours in Milliseconds /** * This will build the appropriate detector given the parameters. @@ -57,11 +57,7 @@ private static long validateAndCalculateWindowLength(TimeValue bucketSpan, TimeV return 0; } if (currentWindow == null) { // we should provide a good default as the user did not specify a window - if(bucketSpan.compareTo(DEFAULT_CHECK_WINDOW) >= 0) { - return FALLBACK_NUMBER_OF_BUCKETS_TO_SPAN * bucketSpan.millis(); - } else { - return DEFAULT_CHECK_WINDOW.millis(); - } + return Math.max(DEFAULT_CHECK_WINDOW_MS, DEFAULT_NUMBER_OF_BUCKETS_TO_SPAN * bucketSpan.millis()); } if (currentWindow.compareTo(bucketSpan) < 0) { throw new IllegalArgumentException( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java index dbc565fc50c12..5465a11d149ab 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilder.java @@ -68,7 +68,6 @@ public class AutodetectBuilder { private static final String MODEL_PLOT_CONFIG_ARG = "--modelplotconfig="; private static final String FIELD_CONFIG_ARG = "--fieldconfig="; static final String LATENCY_ARG = "--latency="; - static final String RESULT_FINALIZATION_WINDOW_ARG = "--resultFinalizationWindow="; static final String MULTIVARIATE_BY_FIELDS_ARG = "--multivariateByFields"; static final String PERSIST_INTERVAL_ARG = "--persistInterval="; static final String MAX_QUANTILE_INTERVAL_ARG = "--maxQuantileInterval="; @@ -202,15 +201,7 @@ List buildAutodetectCommand() { if (analysisConfig != null) { addIfNotNull(analysisConfig.getBucketSpan(), BUCKET_SPAN_ARG, command); addIfNotNull(analysisConfig.getLatency(), LATENCY_ARG, command); - addIfNotNull(analysisConfig.getSummaryCountFieldName(), - SUMMARY_COUNT_FIELD_ARG, command); - if (Boolean.TRUE.equals(analysisConfig.getOverlappingBuckets())) { - Long window = analysisConfig.getResultFinalizationWindow(); - if (window == null) { - window = AnalysisConfig.DEFAULT_RESULT_FINALIZATION_WINDOW; - } - command.add(RESULT_FINALIZATION_WINDOW_ARG + window); - } + addIfNotNull(analysisConfig.getSummaryCountFieldName(), SUMMARY_COUNT_FIELD_ARG, command); if (Boolean.TRUE.equals(analysisConfig.getMultivariateByFields())) { command.add(MULTIVARIATE_BY_FIELDS_ARG); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java index 12cf97734c90d..3b1ca4c3071e1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DelayedDataDetectorFactoryTests.java @@ -52,13 +52,13 @@ public void testBuilder() { assertEquals(Messages.getMessage( Messages.DATAFEED_CONFIG_DELAYED_DATA_CHECK_SPANS_TOO_MANY_BUCKETS, "12h", "2s"), e.getMessage()); - Job withBigBucketSpan = createJob(TimeValue.timeValueHours(3)); + Job withBigBucketSpan = createJob(TimeValue.timeValueHours(1)); datafeedConfig = createDatafeed(true, null); // Should not throw DelayedDataDetector delayedDataDetector = DelayedDataDetectorFactory.buildDetector(withBigBucketSpan, datafeedConfig, mock(Client.class)); - assertThat(delayedDataDetector.getWindow(), equalTo(TimeValue.timeValueHours(3).millis() * 8)); + assertThat(delayedDataDetector.getWindow(), equalTo(TimeValue.timeValueHours(1).millis() * 8)); datafeedConfig = createDatafeed(true, null); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java index 9ef56d927f553..fedd3b320a39e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectBuilderTests.java @@ -54,8 +54,8 @@ public void testBuildAutodetectCommand() { acBuilder.setBucketSpan(TimeValue.timeValueSeconds(120)); acBuilder.setLatency(TimeValue.timeValueSeconds(360)); acBuilder.setSummaryCountFieldName("summaryField"); - acBuilder.setOverlappingBuckets(true); acBuilder.setMultivariateByFields(true); + job.setAnalysisConfig(acBuilder); DataDescription.Builder dd = new DataDescription.Builder(); @@ -65,12 +65,11 @@ public void testBuildAutodetectCommand() { job.setDataDescription(dd); List command = autodetectBuilder(job.build()).buildAutodetectCommand(); - assertEquals(12, command.size()); + assertEquals(11, command.size()); assertTrue(command.contains(AutodetectBuilder.AUTODETECT_PATH)); assertTrue(command.contains(AutodetectBuilder.BUCKET_SPAN_ARG + "120")); assertTrue(command.contains(AutodetectBuilder.LATENCY_ARG + "360")); assertTrue(command.contains(AutodetectBuilder.SUMMARY_COUNT_FIELD_ARG + "summaryField")); - assertTrue(command.contains(AutodetectBuilder.RESULT_FINALIZATION_WINDOW_ARG + "2")); assertTrue(command.contains(AutodetectBuilder.MULTIVARIATE_BY_FIELDS_ARG)); assertTrue(command.contains(AutodetectBuilder.LENGTH_ENCODED_INPUT_ARG)); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java index a923b446b074e..d119b1a8dd2af 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.monitoring; import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.action.ActionListener; @@ -39,6 +40,8 @@ * service life cycles, the intended way to temporarily stop the publishing is using the start and stop methods. */ public class MonitoringService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(MonitoringService.class); + /** * Log a deprecation warning if {@code value} is -1. diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java index a0d56d3a6b5ab..f9f41974402b1 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.monitoring.cleaner; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -26,6 +28,7 @@ * {@code CleanerService} takes care of deleting old monitoring indices. */ public class CleanerService extends AbstractLifecycleComponent { + private static final Logger logger = LogManager.getLogger(CleanerService.class); private final XPackLicenseState licenseState; private final ThreadPool threadPool; diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java index fab40bf0944f1..ac0d4dcbc065c 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/exporter/Exporters.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.monitoring.exporter; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -36,6 +37,7 @@ import static java.util.Collections.emptyMap; public class Exporters extends AbstractLifecycleComponent implements Iterable { + private static final Logger logger = LogManager.getLogger(Exporters.class); private final Settings settings; private final Map factories; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java index ef1eeace73b31..f47798c26b5fc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/filter/IpFilteringIntegrationTests.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.io.OutputStream; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.Socket; import java.nio.charset.StandardCharsets; @@ -66,7 +65,7 @@ public void testThatIpFilteringIsNotAppliedForDefaultTransport() throws Exceptio public void testThatIpFilteringIsAppliedForProfile() throws Exception { try (Socket socket = new Socket()){ - trySocketConnection(socket, new InetSocketAddress(InetAddress.getLoopbackAddress(), getProfilePort("client"))); + trySocketConnection(socket, getProfileAddress("client")); assertThat(socket.isClosed(), is(true)); } } @@ -83,9 +82,9 @@ private void trySocketConnection(Socket socket, InetSocketAddress address) throw } } - private static int getProfilePort(String profile) { + private static InetSocketAddress getProfileAddress(String profile) { TransportAddress transportAddress = randomFrom(internalCluster().getInstance(Transport.class).profileBoundAddresses().get(profile).boundAddresses()); - return transportAddress.address().getPort(); + return transportAddress.address(); } } diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec index ccb9498f3590a..b965bfbe082e7 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs.csv-spec @@ -1510,3 +1510,24 @@ SELECT TRUNCATE(-345.153, 1) AS trimmed; // end::mathTruncateWithPositiveParameter ; + +coalesceReturnNonNull +// tag::coalesceReturnNonNull +SELECT COALESCE(null, 'elastic', 'search') AS "coalesce"; + + coalesce +--------------- +elastic +// end::coalesceReturnNonNull +; + + +coalesceReturnNull +// tag::coalesceReturnNull +SELECT COALESCE(null, null, null, null) AS "coalesce"; + + coalesce +--------------- +null +// end::coalesceReturnNull +; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java index 49414367767c4..75e08509f393d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypeConversionTests.java @@ -61,6 +61,7 @@ public void testConversionToLong() { assertEquals("cannot cast [0xff] to [Long]", e.getMessage()); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35683") public void testConversionToDate() { DataType to = DataType.DATE; { diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.freeze.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.freeze.json new file mode 100644 index 0000000000000..a602f341e1524 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.freeze.json @@ -0,0 +1,48 @@ +{ + "indices.freeze": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/frozen.html", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_freeze", + "paths": [ + "/{index}/_freeze" + ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The name of the index to freeze" + } + }, + "params": { + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, + "master_timeout": { + "type" : "time", + "description" : "Specify timeout for connection to master" + }, + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "closed", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "wait_for_active_shards": { + "type" : "string", + "description" : "Sets the number of active shards to wait for before the operation returns." + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.unfreeze.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.unfreeze.json new file mode 100644 index 0000000000000..b10e869a95758 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/indices.unfreeze.json @@ -0,0 +1,46 @@ +{ + "indices.unfreeze": { + "documentation": "https://www.elastic.co/guide/en/elasticsearch/reference/current/frozen.html", + "methods": [ "POST" ], + "url": { + "path": "/{index}/_unfreeze", + "paths": [ "/{index}/_unfreeze" ], + "parts": { + "index": { + "type": "string", + "required": true, + "description": "The name of the index to unfreeze" + } + }, + "params": { + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, + "master_timeout": { + "type" : "time", + "description" : "Specify timeout for connection to master" + }, + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "closed", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "wait_for_active_shards": { + "type" : "string", + "description" : "Sets the number of active shards to wait for before the operation returns." + } + } + }, + "body": null + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/10_basic.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/10_basic.yml new file mode 100644 index 0000000000000..32daccbcef5cb --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/indices.freeze/10_basic.yml @@ -0,0 +1,133 @@ +--- +"Basic": + +- skip: + version: " - 6.5.99" + reason: Frozen indices are not available before 6.6 + +- do: + index: + index: test + id: "1" + type: "_doc" + body: { "foo": "Hello: 1" } +- do: + index: + index: test + id: "2" + type: "_doc" + body: { "foo": "Hello: 2" } + +- do: + indices.freeze: + index: test + +- do: + search: + index: test + ignore_throttled: false + body: + query: + match: + foo: hello + +- match: {hits.total: 2} + +# unfreeze +- do: + indices.unfreeze: + index: test + +- do: + search: + index: _all + body: + query: + match: + foo: hello + +- match: {hits.total: 2} + +- do: + index: + index: test-01 + id: "1" + type: "_doc" + body: { "foo": "Hello: 01" } + + +- do: + indices.freeze: + index: test* + +- do: + search: + index: _all + ignore_throttled: false + body: + query: + match: + foo: hello + +- match: {hits.total: 3} + +- do: + search: + index: _all + body: + query: + match: + foo: hello + +- match: {hits.total: 0} + +--- +"Test index options": + +- skip: + version: " - 6.5.99" + reason: Frozen indices are not available before 6.6 + +- do: + index: + index: test + id: "1" + type: "_doc" + body: { "foo": "Hello: 1" } + +- do: + index: + index: test-close + id: "1" + type: "_doc" + body: { "foo": "Hello: 1" } + +- do: + indices.close: + index: test-close + +- do: + indices.freeze: + index: test*,not_available + ignore_unavailable: true + +- do: + search: + index: _all + body: + query: + match: + foo: hello + +- match: {hits.total: 0} + +- do: + search: + index: _all + ignore_throttled: false + body: + query: + match: + foo: hello + +- match: {hits.total: 1}