diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
index 11f879a93bc71..99fc6c06f1b76 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy
@@ -104,7 +104,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
* format of the response is incompatible i.e. it is not a JSON object.
*/
static shouldAddShardFailureCheck(String path) {
- return path.startsWith('_cat') == false && path.startsWith('_xpack/ml/datafeeds/') == false
+ return path.startsWith('_cat') == false && path.startsWith('_xpack/ml/datafeeds/') == false
}
/**
@@ -293,7 +293,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
}
void emitDo(String method, String pathAndQuery, String body,
- String catchPart, List warnings, boolean inSetup) {
+ String catchPart, List warnings, boolean inSetup, boolean skipShardFailures) {
def (String path, String query) = pathAndQuery.tokenize('?')
if (path == null) {
path = '' // Catch requests to the root...
@@ -345,7 +345,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
* section so we have to skip it there. We also omit the assertion
* from APIs that don't return a JSON object
*/
- if (false == inSetup && shouldAddShardFailureCheck(path)) {
+ if (false == inSetup && skipShardFailures == false && shouldAddShardFailureCheck(path)) {
current.println(" - is_false: _shards.failures")
}
}
@@ -393,7 +393,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
pathAndQuery = pathAndQuery.substring(1)
}
emitDo(method, pathAndQuery, body, catchPart, snippet.warnings,
- inSetup)
+ inSetup, snippet.skipShardsFailures)
}
}
diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy
index fbc231aa764dc..83a6a05ec5df7 100644
--- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy
+++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/SnippetsTask.groovy
@@ -45,7 +45,7 @@ public class SnippetsTask extends DefaultTask {
private static final String WARNING = /warning:(.+)/
private static final String CAT = /(_cat)/
private static final String TEST_SYNTAX =
- /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING) ?/
+ /(?:$CATCH|$SUBSTITUTION|$SKIP|(continued)|$SETUP|$WARNING|(skip_shard_failures)) ?/
/**
* Action to take on each snippet. Called with a single parameter, an
@@ -233,6 +233,10 @@ public class SnippetsTask extends DefaultTask {
snippet.warnings.add(it.group(7))
return
}
+ if (it.group(8) != null) {
+ snippet.skipShardsFailures = true
+ return
+ }
throw new InvalidUserDataException(
"Invalid test marker: $line")
}
@@ -329,6 +333,7 @@ public class SnippetsTask extends DefaultTask {
String setup = null
boolean curl
List warnings = new ArrayList()
+ boolean skipShardsFailures = false
@Override
public String toString() {
@@ -359,6 +364,9 @@ public class SnippetsTask extends DefaultTask {
for (String warning in warnings) {
result += "[warning:$warning]"
}
+ if (skipShardsFailures) {
+ result += '[skip_shard_failures]'
+ }
}
if (testResponse) {
result += '// TESTRESPONSE'
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java
index b6c6866966725..373b94124d43e 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrClient.java
@@ -27,6 +27,7 @@
import org.elasticsearch.client.ccr.FollowInfoResponse;
import org.elasticsearch.client.ccr.FollowStatsRequest;
import org.elasticsearch.client.ccr.FollowStatsResponse;
+import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
import org.elasticsearch.client.ccr.PauseFollowRequest;
@@ -36,6 +37,7 @@
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.client.core.AcknowledgedResponse;
+import org.elasticsearch.client.core.BroadcastResponse;
import java.io.IOException;
import java.util.Collections;
@@ -233,6 +235,48 @@ public void unfollowAsync(UnfollowRequest request,
);
}
+ /**
+ * Instructs an index acting as a leader index to forget the specified follower index.
+ *
+ * See the docs for more details
+ * on the intended usage of this API.
+ *
+ * @param request the request
+ * @param options the request options (e.g., headers), use {@link RequestOptions#DEFAULT} if the defaults are acceptable.
+ * @return the response
+ * @throws IOException if an I/O exception occurs while executing this request
+ */
+ public BroadcastResponse forgetFollower(final ForgetFollowerRequest request, final RequestOptions options) throws IOException {
+ return restHighLevelClient.performRequestAndParseEntity(
+ request,
+ CcrRequestConverters::forgetFollower,
+ options,
+ BroadcastResponse::fromXContent,
+ Collections.emptySet());
+ }
+
+ /**
+ * Asynchronously instructs an index acting as a leader index to forget the specified follower index.
+ *
+ * See the docs for more details
+ * on the intended usage of this API.
+ *
+ * @param request the request
+ * @param options the request options (e.g., headers), use {@link RequestOptions#DEFAULT} if the defaults are acceptable.
+ */
+ public void forgetFollowerAsync(
+ final ForgetFollowerRequest request,
+ final RequestOptions options,
+ final ActionListener listener) {
+ restHighLevelClient.performRequestAsyncAndParseEntity(
+ request,
+ CcrRequestConverters::forgetFollower,
+ options,
+ BroadcastResponse::fromXContent,
+ listener,
+ Collections.emptySet());
+ }
+
/**
* Stores an auto follow pattern.
*
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java
index 744714af41a3a..940a1e3c5b31a 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/CcrRequestConverters.java
@@ -28,6 +28,7 @@
import org.elasticsearch.client.ccr.DeleteAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.FollowInfoRequest;
import org.elasticsearch.client.ccr.FollowStatsRequest;
+import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
@@ -80,6 +81,17 @@ static Request unfollow(UnfollowRequest unfollowRequest) {
return new Request(HttpPost.METHOD_NAME, endpoint);
}
+ static Request forgetFollower(final ForgetFollowerRequest forgetFollowerRequest) throws IOException {
+ final String endpoint = new RequestConverters.EndpointBuilder()
+ .addPathPart(forgetFollowerRequest.leaderIndex())
+ .addPathPartAsIs("_ccr")
+ .addPathPartAsIs("forget_follower")
+ .build();
+ final Request request = new Request(HttpPost.METHOD_NAME, endpoint);
+ request.setEntity(createEntity(forgetFollowerRequest, REQUEST_BODY_CONTENT_TYPE));
+ return request;
+ }
+
static Request putAutoFollowPattern(PutAutoFollowPatternRequest putAutoFollowPatternRequest) throws IOException {
String endpoint = new RequestConverters.EndpointBuilder()
.addPathPartAsIs("_ccr", "auto_follow")
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ForgetFollowerRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ForgetFollowerRequest.java
new file mode 100644
index 0000000000000..3d20a6d934d9d
--- /dev/null
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ccr/ForgetFollowerRequest.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.ccr;
+
+import org.elasticsearch.client.Validatable;
+import org.elasticsearch.common.xcontent.ToXContentObject;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+
+import java.io.IOException;
+import java.util.Objects;
+
+/**
+ * Represents a forget follower request. Note that this an expert API intended to be used only when unfollowing a follower index fails to
+ * remove the follower retention leases. Please be sure that you understand the purpose this API before using.
+ */
+public final class ForgetFollowerRequest implements ToXContentObject, Validatable {
+
+ private final String followerCluster;
+
+ private final String followerIndex;
+
+ private final String followerIndexUUID;
+
+ private final String leaderRemoteCluster;
+
+ private final String leaderIndex;
+
+ /**
+ * The name of the leader index.
+ *
+ * @return the name of the leader index
+ */
+ public String leaderIndex() {
+ return leaderIndex;
+ }
+
+ /**
+ * Construct a forget follower request.
+ *
+ * @param followerCluster the name of the cluster containing the follower index to forget
+ * @param followerIndex the name of follower index
+ * @param followerIndexUUID the UUID of the follower index
+ * @param leaderRemoteCluster the alias of the remote cluster containing the leader index from the perspective of the follower index
+ * @param leaderIndex the name of the leader index
+ */
+ public ForgetFollowerRequest(
+ final String followerCluster,
+ final String followerIndex,
+ final String followerIndexUUID,
+ final String leaderRemoteCluster,
+ final String leaderIndex) {
+ this.followerCluster = Objects.requireNonNull(followerCluster);
+ this.followerIndex = Objects.requireNonNull(followerIndex);
+ this.followerIndexUUID = Objects.requireNonNull(followerIndexUUID);
+ this.leaderRemoteCluster = Objects.requireNonNull(leaderRemoteCluster);
+ this.leaderIndex = Objects.requireNonNull(leaderIndex);
+ }
+
+ @Override
+ public XContentBuilder toXContent(final XContentBuilder builder, final Params params) throws IOException {
+ builder.startObject();
+ {
+ builder.field("follower_cluster", followerCluster);
+ builder.field("follower_index", followerIndex);
+ builder.field("follower_index_uuid", followerIndexUUID);
+ builder.field("leader_remote_cluster", leaderRemoteCluster);
+ }
+ builder.endObject();
+ return builder;
+ }
+
+}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java
new file mode 100644
index 0000000000000..3665ba5bf5009
--- /dev/null
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/core/BroadcastResponse.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.core;
+
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.common.ParseField;
+import org.elasticsearch.common.xcontent.ConstructingObjectParser;
+import org.elasticsearch.common.xcontent.XContentParser;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Objects;
+
+/**
+ * Represents a response to a request that is broadcast to a collection of shards.
+ */
+public class BroadcastResponse {
+
+ private final Shards shards;
+
+ /**
+ * Represents the shard-level summary of the response execution.
+ *
+ * @return the shard-level response summary
+ */
+ public Shards shards() {
+ return shards;
+ }
+
+ BroadcastResponse(final Shards shards) {
+ this.shards = Objects.requireNonNull(shards);
+ }
+
+ private static final ParseField SHARDS_FIELD = new ParseField("_shards");
+
+ static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(
+ "broadcast_response",
+ a -> new BroadcastResponse((Shards) a[0]));
+
+ static {
+ PARSER.declareObject(ConstructingObjectParser.constructorArg(), Shards.SHARDS_PARSER, SHARDS_FIELD);
+ }
+
+ /**
+ * Parses a broadcast response.
+ *
+ * @param parser the parser
+ * @return a broadcast response parsed from the specified parser
+ * @throws IOException if an I/O exception occurs parsing the response
+ */
+ public static BroadcastResponse fromXContent(final XContentParser parser) throws IOException {
+ return PARSER.parse(parser, null);
+ }
+
+ /**
+ * Represents the results of a collection of shards on which a request was executed against.
+ */
+ public static class Shards {
+
+ private final int total;
+
+ /**
+ * The total number of shards on which a request was executed against.
+ *
+ * @return the total number of shards
+ */
+ public int total() {
+ return total;
+ }
+
+ private final int successful;
+
+ /**
+ * The number of successful shards on which a request was executed against.
+ *
+ * @return the number of successful shards
+ */
+ public int successful() {
+ return successful;
+ }
+
+ private final int skipped;
+
+ /**
+ * The number of shards skipped by the request.
+ *
+ * @return the number of skipped shards
+ */
+ public int skipped() {
+ return skipped;
+ }
+
+ private final int failed;
+
+ /**
+ * The number of shards on which a request failed to be executed against.
+ *
+ * @return the number of failed shards
+ */
+ public int failed() {
+ return failed;
+ }
+
+ private final Collection failures;
+
+ /**
+ * The failures corresponding to the shards on which a request failed to be executed against. Note that the number of failures might
+ * not match {@link #failed()} as some responses group together shard failures.
+ *
+ * @return the failures
+ */
+ public Collection failures() {
+ return failures;
+ }
+
+ Shards(
+ final int total,
+ final int successful,
+ final int skipped,
+ final int failed,
+ final Collection failures) {
+ this.total = total;
+ this.successful = successful;
+ this.skipped = skipped;
+ this.failed = failed;
+ this.failures = Collections.unmodifiableCollection(Objects.requireNonNull(failures));
+ }
+
+ private static final ParseField TOTAL_FIELD = new ParseField("total");
+ private static final ParseField SUCCESSFUL_FIELD = new ParseField("successful");
+ private static final ParseField SKIPPED_FIELD = new ParseField("skipped");
+ private static final ParseField FAILED_FIELD = new ParseField("failed");
+ private static final ParseField FAILURES_FIELD = new ParseField("failures");
+
+ @SuppressWarnings("unchecked")
+ static final ConstructingObjectParser SHARDS_PARSER = new ConstructingObjectParser<>(
+ "shards",
+ a -> new Shards(
+ (int) a[0], // total
+ (int) a[1], // successful
+ a[2] == null ? 0 : (int) a[2], // skipped
+ (int) a[3], // failed
+ a[4] == null ? Collections.emptyList() : (Collection) a[4])); // failures
+
+ static {
+ SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), TOTAL_FIELD);
+ SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), SUCCESSFUL_FIELD);
+ SHARDS_PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), SKIPPED_FIELD);
+ SHARDS_PARSER.declareInt(ConstructingObjectParser.constructorArg(), FAILED_FIELD);
+ SHARDS_PARSER.declareObjectArray(
+ ConstructingObjectParser.optionalConstructorArg(),
+ DefaultShardOperationFailedException.PARSER, FAILURES_FIELD);
+ }
+
+ }
+
+}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java
index 2566bb4912105..6c6b1ebe6a79e 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CCRIT.java
@@ -35,6 +35,7 @@
import org.elasticsearch.client.ccr.FollowInfoResponse;
import org.elasticsearch.client.ccr.FollowStatsRequest;
import org.elasticsearch.client.ccr.FollowStatsResponse;
+import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
import org.elasticsearch.client.ccr.IndicesFollowStats.ShardFollowStats;
@@ -45,20 +46,25 @@
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.client.core.AcknowledgedResponse;
+import org.elasticsearch.client.core.BroadcastResponse;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.indices.CreateIndexResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.yaml.ObjectPath;
import org.junit.Before;
import java.io.IOException;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
+import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;
@@ -192,6 +198,61 @@ public void testIndexFollowing() throws Exception {
assertThat(unfollowResponse.isAcknowledged(), is(true));
}
+ public void testForgetFollower() throws IOException {
+ final CcrClient ccrClient = highLevelClient().ccr();
+
+ final CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader");
+ final Map settings = new HashMap<>(3);
+ final int numberOfShards = randomIntBetween(1, 2);
+ settings.put("index.number_of_replicas", "0");
+ settings.put("index.number_of_shards", Integer.toString(numberOfShards));
+ settings.put("index.soft_deletes.enabled", Boolean.TRUE.toString());
+ createIndexRequest.settings(settings);
+ final CreateIndexResponse response = highLevelClient().indices().create(createIndexRequest, RequestOptions.DEFAULT);
+ assertThat(response.isAcknowledged(), is(true));
+
+ final PutFollowRequest putFollowRequest = new PutFollowRequest("local_cluster", "leader", "follower", ActiveShardCount.ONE);
+ final PutFollowResponse putFollowResponse = execute(putFollowRequest, ccrClient::putFollow, ccrClient::putFollowAsync);
+ assertTrue(putFollowResponse.isFollowIndexCreated());
+ assertTrue(putFollowResponse.isFollowIndexShardsAcked());
+ assertTrue(putFollowResponse.isIndexFollowingStarted());
+
+ final String clusterName = highLevelClient().info(RequestOptions.DEFAULT).getClusterName().value();
+
+ final Request statsRequest = new Request("GET", "/follower/_stats");
+ final Response statsResponse = client().performRequest(statsRequest);
+ final ObjectPath statsObjectPath = ObjectPath.createFromResponse(statsResponse);
+ final String followerIndexUUID = statsObjectPath.evaluate("indices.follower.uuid");
+
+ final PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower");
+ AcknowledgedResponse pauseFollowResponse = execute(pauseFollowRequest, ccrClient::pauseFollow, ccrClient::pauseFollowAsync);
+ assertTrue(pauseFollowResponse.isAcknowledged());
+
+ final ForgetFollowerRequest forgetFollowerRequest =
+ new ForgetFollowerRequest(clusterName, "follower", followerIndexUUID, "local_cluster", "leader");
+ final BroadcastResponse forgetFollowerResponse =
+ execute(forgetFollowerRequest, ccrClient::forgetFollower, ccrClient::forgetFollowerAsync);
+ assertThat(forgetFollowerResponse.shards().total(), equalTo(numberOfShards));
+ assertThat(forgetFollowerResponse.shards().successful(), equalTo(numberOfShards));
+ assertThat(forgetFollowerResponse.shards().skipped(), equalTo(0));
+ assertThat(forgetFollowerResponse.shards().failed(), equalTo(0));
+ assertThat(forgetFollowerResponse.shards().failures(), empty());
+
+ final Request retentionLeasesRequest = new Request("GET", "/leader/_stats");
+ retentionLeasesRequest.addParameter("level", "shards");
+ final Response retentionLeasesResponse = client().performRequest(retentionLeasesRequest);
+ final Map, ?> shardsStats = ObjectPath.createFromResponse(retentionLeasesResponse).evaluate("indices.leader.shards");
+ assertThat(shardsStats.keySet(), hasSize(numberOfShards));
+ for (int i = 0; i < numberOfShards; i++) {
+ final List> shardStats = (List>) shardsStats.get(Integer.toString(i));
+ assertThat(shardStats, hasSize(1));
+ final Map, ?> shardStatsAsMap = (Map, ?>) shardStats.get(0);
+ final Map, ?> retentionLeasesStats = (Map, ?>) shardStatsAsMap.get("retention_leases");
+ final List> leases = (List>) retentionLeasesStats.get("leases");
+ assertThat(leases, empty());
+ }
+ }
+
public void testAutoFollowing() throws Exception {
CcrClient ccrClient = highLevelClient().ccr();
PutAutoFollowPatternRequest putAutoFollowPatternRequest =
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java
index 8470194b65449..e440421d46b40 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CcrRequestConvertersTests.java
@@ -29,6 +29,7 @@
import org.elasticsearch.client.ccr.FollowConfig;
import org.elasticsearch.client.ccr.FollowInfoRequest;
import org.elasticsearch.client.ccr.FollowStatsRequest;
+import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.PauseFollowRequest;
import org.elasticsearch.client.ccr.PutAutoFollowPatternRequest;
@@ -39,9 +40,11 @@
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.test.ESTestCase;
+import java.io.IOException;
import java.util.Arrays;
import java.util.Locale;
+import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.nullValue;
@@ -91,6 +94,20 @@ public void testUnfollow() {
assertThat(result.getEntity(), nullValue());
}
+ public void testForgetFollower() throws IOException {
+ final ForgetFollowerRequest request = new ForgetFollowerRequest(
+ randomAlphaOfLength(8),
+ randomAlphaOfLength(8),
+ randomAlphaOfLength(8),
+ randomAlphaOfLength(8),
+ randomAlphaOfLength(8));
+ final Request convertedRequest = CcrRequestConverters.forgetFollower(request);
+ assertThat(convertedRequest.getMethod(), equalTo(HttpPost.METHOD_NAME));
+ assertThat(convertedRequest.getEndpoint(), equalTo("/" + request.leaderIndex() + "/_ccr/forget_follower"));
+ assertThat(convertedRequest.getParameters().keySet(), empty());
+ RequestConvertersTests.assertToXContentBody(request, convertedRequest.getEntity());
+ }
+
public void testPutAutofollowPattern() throws Exception {
PutAutoFollowPatternRequest putAutoFollowPatternRequest = new PutAutoFollowPatternRequest(randomAlphaOfLength(4),
randomAlphaOfLength(4), Arrays.asList(generateRandomStringArray(4, 4, false)));
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java
new file mode 100644
index 0000000000000..96438725d4ef0
--- /dev/null
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/core/BroadcastResponseTests.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.client.core;
+
+import org.elasticsearch.action.support.DefaultShardOperationFailedException;
+import org.elasticsearch.cluster.ClusterModule;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
+import org.elasticsearch.common.xcontent.NamedXContentRegistry;
+import org.elasticsearch.common.xcontent.ToXContent;
+import org.elasticsearch.common.xcontent.XContent;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentParser;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.hasSize;
+import static org.hamcrest.Matchers.isIn;
+
+public class BroadcastResponseTests extends ESTestCase {
+
+ public void testFromXContent() throws IOException {
+ final String index = randomAlphaOfLength(8);
+ final String id = randomAlphaOfLength(8);
+ final int total = randomIntBetween(1, 16);
+ final int successful = total - scaledRandomIntBetween(0, total);
+ final int failed = scaledRandomIntBetween(0, total - successful);
+ final List failures = new ArrayList<>();
+ final Set shardIds = new HashSet<>();
+ for (int i = 0; i < failed; i++) {
+ final DefaultShardOperationFailedException failure = new DefaultShardOperationFailedException(
+ index,
+ randomValueOtherThanMany(shardIds::contains, () -> randomIntBetween(0, total - 1)),
+ new RetentionLeaseNotFoundException(id));
+ failures.add(failure);
+ shardIds.add(failure.shardId());
+ }
+
+ final org.elasticsearch.action.support.broadcast.BroadcastResponse to =
+ new org.elasticsearch.action.support.broadcast.BroadcastResponse(total, successful, failed, failures);
+
+ final XContentType xContentType = randomFrom(XContentType.values());
+ final BytesReference bytes = toShuffledXContent(to, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean());
+
+ final XContent xContent = XContentFactory.xContent(xContentType);
+ final XContentParser parser = xContent.createParser(
+ new NamedXContentRegistry(ClusterModule.getNamedXWriteables()),
+ LoggingDeprecationHandler.INSTANCE,
+ bytes.streamInput());
+ final BroadcastResponse from = BroadcastResponse.fromXContent(parser);
+ assertThat(from.shards().total(), equalTo(total));
+ assertThat(from.shards().successful(), equalTo(successful));
+ assertThat(from.shards().skipped(), equalTo(0));
+ assertThat(from.shards().failed(), equalTo(failed));
+ assertThat(from.shards().failures(), hasSize(failed == 0 ? failed : 1)); // failures are grouped
+ if (failed > 0) {
+ final DefaultShardOperationFailedException groupedFailure = from.shards().failures().iterator().next();
+ assertThat(groupedFailure.index(), equalTo(index));
+ assertThat(groupedFailure.shardId(), isIn(shardIds));
+ assertThat(groupedFailure.reason(), containsString("reason=retention lease with ID [" + id + "] not found"));
+ }
+ }
+
+}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java
index 23cdd39787d32..baf8132096cb8 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java
@@ -40,6 +40,7 @@
import org.elasticsearch.client.ccr.FollowInfoResponse;
import org.elasticsearch.client.ccr.FollowStatsRequest;
import org.elasticsearch.client.ccr.FollowStatsResponse;
+import org.elasticsearch.client.ccr.ForgetFollowerRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternRequest;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse;
import org.elasticsearch.client.ccr.GetAutoFollowPatternResponse.Pattern;
@@ -51,15 +52,18 @@
import org.elasticsearch.client.ccr.ResumeFollowRequest;
import org.elasticsearch.client.ccr.UnfollowRequest;
import org.elasticsearch.client.core.AcknowledgedResponse;
+import org.elasticsearch.client.core.BroadcastResponse;
import org.elasticsearch.client.indices.CreateIndexRequest;
import org.elasticsearch.client.indices.CreateIndexResponse;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.json.JsonXContent;
+import org.elasticsearch.test.rest.yaml.ObjectPath;
import org.junit.Before;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
+import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CountDownLatch;
@@ -395,6 +399,101 @@ public void onFailure(Exception e) {
assertTrue(latch.await(30L, TimeUnit.SECONDS));
}
+ public void testForgetFollower() throws InterruptedException, IOException {
+ final RestHighLevelClient client = highLevelClient();
+ final String leaderIndex = "leader";
+ {
+ // create leader index
+ final CreateIndexRequest createIndexRequest = new CreateIndexRequest(leaderIndex);
+ final Map settings = new HashMap<>(2);
+ final int numberOfShards = randomIntBetween(1, 2);
+ settings.put("index.number_of_shards", Integer.toString(numberOfShards));
+ settings.put("index.soft_deletes.enabled", Boolean.TRUE.toString());
+ createIndexRequest.settings(settings);
+ final CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT);
+ assertThat(response.isAcknowledged(), is(true));
+ }
+ final String followerIndex = "follower";
+
+ final PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", followerIndex, ActiveShardCount.ONE);
+ final PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT);
+ assertTrue(putFollowResponse.isFollowIndexCreated());
+ assertTrue((putFollowResponse.isFollowIndexShardsAcked()));
+ assertTrue(putFollowResponse.isIndexFollowingStarted());
+
+ final PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower");
+ AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT);
+ assertTrue(pauseFollowResponse.isAcknowledged());
+
+ final String followerCluster = highLevelClient().info(RequestOptions.DEFAULT).getClusterName().value();
+ final Request statsRequest = new Request("GET", "/follower/_stats");
+ final Response statsResponse = client().performRequest(statsRequest);
+ final ObjectPath statsObjectPath = ObjectPath.createFromResponse(statsResponse);
+ final String followerIndexUUID = statsObjectPath.evaluate("indices.follower.uuid");
+
+ final String leaderCluster = "local";
+
+ // tag::ccr-forget-follower-request
+ final ForgetFollowerRequest request = new ForgetFollowerRequest(
+ followerCluster, // <1>
+ followerIndex, // <2>
+ followerIndexUUID, // <3>
+ leaderCluster, // <4>
+ leaderIndex); // <5>
+ // end::ccr-forget-follower-request
+
+ // tag::ccr-forget-follower-execute
+ final BroadcastResponse response = client
+ .ccr()
+ .forgetFollower(request, RequestOptions.DEFAULT);
+ // end::ccr-forget-follower-execute
+
+ // tag::ccr-forget-follower-response
+ final BroadcastResponse.Shards shards = response.shards(); // <1>
+ final int total = shards.total(); // <2>
+ final int successful = shards.successful(); // <3>
+ final int skipped = shards.skipped(); // <4>
+ final int failed = shards.failed(); // <5>
+ shards.failures().forEach(failure -> {}); // <6>
+ // end::ccr-forget-follower-response
+
+ // tag::ccr-forget-follower-execute-listener
+ ActionListener listener =
+ new ActionListener() {
+
+ @Override
+ public void onResponse(final BroadcastResponse response) {
+ final BroadcastResponse.Shards shards = // <1>
+ response.shards();
+ final int total = shards.total();
+ final int successful = shards.successful();
+ final int skipped = shards.skipped();
+ final int failed = shards.failed();
+ shards.failures().forEach(failure -> {});
+ }
+
+ @Override
+ public void onFailure(final Exception e) {
+ // <2>
+ }
+
+ };
+ // end::ccr-forget-follower-execute-listener
+
+ // replace the empty listener by a blocking listener in test
+ final CountDownLatch latch = new CountDownLatch(1);
+ listener = new LatchedActionListener<>(listener, latch);
+
+ // tag::ccr-forget-follower-execute-async
+ client.ccr().forgetFollowerAsync(
+ request,
+ RequestOptions.DEFAULT,
+ listener); // <1>
+ // end::ccr-forget-follower-execute-async
+
+ assertTrue(latch.await(30L, TimeUnit.SECONDS));
+ }
+
public void testPutAutoFollowPattern() throws Exception {
RestHighLevelClient client = highLevelClient();
diff --git a/docs/java-rest/high-level/ccr/forget_follower.asciidoc b/docs/java-rest/high-level/ccr/forget_follower.asciidoc
new file mode 100644
index 0000000000000..bf1fde014b8e6
--- /dev/null
+++ b/docs/java-rest/high-level/ccr/forget_follower.asciidoc
@@ -0,0 +1,45 @@
+--
+:api: ccr-forget-follower
+:request: ForgetFollowerRequest
+:response: BroadcastResponse
+--
+
+[id="{upid}-{api}"]
+=== Forget Follower API
+
+[id="{upid}-{api}-request"]
+==== Request
+
+The Forget Follower API allows you to manually remove the follower retention
+leases from the leader. Note that these retention leases are automatically
+managed by the following index. This API exists only for cases when invoking
+the unfollow API on the follower index is unable to remove the follower
+retention leases.
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests-file}[{api}-request]
+--------------------------------------------------
+<1> The name of the cluster containing the follower index.
+<2> The name of the follower index.
+<3> The UUID of the follower index (can be obtained from index stats).
+<4> The alias of the remote cluster containing the leader index.
+<5> The name of the leader index.
+
+[id="{upid}-{api}-response"]
+==== Response
+
+The returned +{response}+ indicates if the response was successful.
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests-file}[{api}-response]
+--------------------------------------------------
+<1> The high-level shards summary.
+<2> The total number of shards the request was executed on.
+<3> The total number of shards the request was successful on.
+<4> The total number of shards the request was skipped on (should always be zero).
+<5> The total number of shards the request failed on.
+<6> The shard-level failures.
+
+include::../execution.asciidoc[]
diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc
index 61e7bf313a758..a5428845a8273 100644
--- a/docs/java-rest/high-level/supported-apis.asciidoc
+++ b/docs/java-rest/high-level/supported-apis.asciidoc
@@ -501,6 +501,7 @@ The Java High Level REST Client supports the following CCR APIs:
* <<{upid}-ccr-pause-follow>>
* <<{upid}-ccr-resume-follow>>
* <<{upid}-ccr-unfollow>>
+* <<{upid}-ccr-forget-follower>>
* <<{upid}-ccr-put-auto-follow-pattern>>
* <<{upid}-ccr-delete-auto-follow-pattern>>
* <<{upid}-ccr-get-auto-follow-pattern>>
@@ -512,6 +513,7 @@ include::ccr/put_follow.asciidoc[]
include::ccr/pause_follow.asciidoc[]
include::ccr/resume_follow.asciidoc[]
include::ccr/unfollow.asciidoc[]
+include::ccr/forget_follower.asciidoc[]
include::ccr/put_auto_follow_pattern.asciidoc[]
include::ccr/delete_auto_follow_pattern.asciidoc[]
include::ccr/get_auto_follow_pattern.asciidoc[]
diff --git a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc
index 5cf9865501a00..6d5f0389cd770 100644
--- a/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc
+++ b/docs/reference/aggregations/bucket/daterange-aggregation.asciidoc
@@ -209,7 +209,7 @@ POST /sales/_search?size=0
// CONSOLE
// TEST[setup:sales]
-<1> This date will be converted to `2016-02-15T00:00:00.000+01:00`.
+<1> This date will be converted to `2016-02-01T00:00:00.000+01:00`.
<2> `now/d` will be rounded to the beginning of the day in the CET time zone.
==== Keyed Response
diff --git a/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
index eb6a4d820ef1b..21c7387e0f7f5 100644
--- a/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
+++ b/docs/reference/analysis/tokenfilters/minhash-tokenfilter.asciidoc
@@ -1,7 +1,7 @@
[[analysis-minhash-tokenfilter]]
-=== Minhash Token Filter
+=== MinHash Token Filter
-A token filter of type `min_hash` hashes each token of the token stream and divides
+The `min_hash` token filter hashes each token of the token stream and divides
the resulting hashes into buckets, keeping the lowest-valued hashes per
bucket. It then returns these hashes as tokens.
@@ -20,3 +20,120 @@ The following are settings that can be set for a `min_hash` token filter.
bucket to its circular right. Only takes effect if hash_set_size is equal to one.
Defaults to `true` if bucket_count is greater than one, else `false`.
|=======================================================================
+
+Some points to consider while setting up a `min_hash` filter:
+
+* `min_hash` filter input tokens should typically be k-words shingles produced
+from <>. You should
+choose `k` large enough so that the probability of any given shingle
+occurring in a document is low. At the same time, as
+internally each shingle is hashed into to 128-bit hash, you should choose
+`k` small enough so that all possible
+different k-words shingles can be hashed to 128-bit hash with
+minimal collision. 5-word shingles typically work well.
+
+* choosing the right settings for `hash_count`, `bucket_count` and
+`hash_set_size` needs some experimentation.
+** to improve the precision, you should increase `bucket_count` or
+`hash_set_size`. Higher values of `bucket_count` or `hash_set_size`
+will provide a higher guarantee that different tokens are
+indexed to different buckets.
+** to improve the recall,
+you should increase `hash_token` parameter. For example,
+setting `hash_count=2`, will make each token to be hashed in
+two different ways, thus increasing the number of potential
+candidates for search.
+
+* the default settings makes the `min_hash` filter to produce for
+each document 512 `min_hash` tokens, each is of size 16 bytes.
+Thus, each document's size will be increased by around 8Kb.
+
+* `min_hash` filter is used to hash for Jaccard similarity. This means
+that it doesn't matter how many times a document contains a certain token,
+only that if it contains it or not.
+
+==== Theory
+MinHash token filter allows you to hash documents for similarity search.
+Similarity search, or nearest neighbor search is a complex problem.
+A naive solution requires an exhaustive pairwise comparison between a query
+document and every document in an index. This is a prohibitive operation
+if the index is large. A number of approximate nearest neighbor search
+solutions have been developed to make similarity search more practical and
+computationally feasible. One of these solutions involves hashing of documents.
+
+Documents are hashed in a way that similar documents are more likely
+to produce the same hash code and are put into the same hash bucket,
+while dissimilar documents are more likely to be hashed into
+different hash buckets. This type of hashing is known as
+locality sensitive hashing (LSH).
+
+Depending on what constitutes the similarity between documents,
+various LSH functions https://arxiv.org/abs/1408.2927[have been proposed].
+For https://en.wikipedia.org/wiki/Jaccard_index[Jaccard similarity], a popular
+LSH function is https://en.wikipedia.org/wiki/MinHash[MinHash].
+A general idea of the way MinHash produces a signature for a document
+is by applying a random permutation over the whole index vocabulary (random
+numbering for the vocabulary), and recording the minimum value for this permutation
+for the document (the minimum number for a vocabulary word that is present
+in the document). The permutations are run several times;
+combining the minimum values for all of them will constitute a
+signature for the document.
+
+In practice, instead of random permutations, a number of hash functions
+are chosen. A hash function calculates a hash code for each of a
+document's tokens and chooses the minimum hash code among them.
+The minimum hash codes from all hash functions are combined
+to form a signature for the document.
+
+
+==== Example of setting MinHash Token Filter in Elasticsearch
+Here is an example of setting up a `min_hash` filter:
+
+[source,js]
+--------------------------------------------------
+POST /index1
+{
+ "settings": {
+ "analysis": {
+ "filter": {
+ "my_shingle_filter": { <1>
+ "type": "shingle",
+ "min_shingle_size": 5,
+ "max_shingle_size": 5,
+ "output_unigrams": false
+ },
+ "my_minhash_filter": {
+ "type": "min_hash",
+ "hash_count": 1, <2>
+ "bucket_count": 512, <3>
+ "hash_set_size": 1, <4>
+ "with_rotation": true <5>
+ }
+ },
+ "analyzer": {
+ "my_analyzer": {
+ "tokenizer": "standard",
+ "filter": [
+ "my_shingle_filter",
+ "my_minhash_filter"
+ ]
+ }
+ }
+ }
+ },
+ "mappings": {
+ "properties": {
+ "text": {
+ "fingerprint": "text",
+ "analyzer": "my_analyzer"
+ }
+ }
+ }
+}
+--------------------------------------------------
+// NOTCONSOLE
+<1> setting a shingle filter with 5-word shingles
+<2> setting min_hash filter to hash with 1 hash
+<3> setting min_hash filter to hash tokens into 512 buckets
+<4> setting min_hash filter to keep only a single smallest hash in each bucket
+<5> setting min_hash filter to fill empty buckets with values from neighboring buckets
diff --git a/docs/reference/ccr/apis/ccr-apis.asciidoc b/docs/reference/ccr/apis/ccr-apis.asciidoc
index c7c5194790360..3a745f239867d 100644
--- a/docs/reference/ccr/apis/ccr-apis.asciidoc
+++ b/docs/reference/ccr/apis/ccr-apis.asciidoc
@@ -19,6 +19,7 @@ You can use the following APIs to perform {ccr} operations.
* <>
* <>
* <>
+* <>
* <>
* <>
@@ -38,6 +39,7 @@ include::follow/put-follow.asciidoc[]
include::follow/post-pause-follow.asciidoc[]
include::follow/post-resume-follow.asciidoc[]
include::follow/post-unfollow.asciidoc[]
+include::follow/post-forget-follower.asciidoc[]
include::follow/get-follow-stats.asciidoc[]
include::follow/get-follow-info.asciidoc[]
diff --git a/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc
new file mode 100644
index 0000000000000..5d5fb6a218449
--- /dev/null
+++ b/docs/reference/ccr/apis/follow/post-forget-follower.asciidoc
@@ -0,0 +1,152 @@
+[role="xpack"]
+[testenv="platinum"]
+[[ccr-post-forget-follower]]
+=== Forget Follower API
+++++
+Forget Follower
+++++
+
+Removes the follower retention leases from the leader.
+
+==== Description
+
+A following index takes out retention leases on its leader index. These
+retention leases are used to increase the likelihood that the shards of the
+leader index retain the history of operations that the shards of the following
+index need to execute replication. When a follower index is converted to a
+regular index via the <> (either via explicit
+execution of this API, or implicitly via {ilm}), these retention leases are
+removed. However, removing these retention leases can fail (e.g., if the remote
+cluster containing the leader index is unavailable). While these retention
+leases will eventually expire on their own, their extended existence can cause
+the leader index to hold more history than necessary, and prevent {ilm} from
+performing some operations on the leader index. This API exists to enable
+manually removing these retention leases when the unfollow API was unable to do
+so.
+
+NOTE: This API does not stop replication by a following index. If you use this
+API targeting a follower index that is still actively following, the following
+index will add back retention leases on the leader. The only purpose of this API
+is to handle the case of failure to remove the following retention leases after
+the <> is invoked.
+
+==== Request
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+PUT /follower_index/_ccr/follow?wait_for_active_shards=1
+{
+ "remote_cluster" : "remote_cluster",
+ "leader_index" : "leader_index"
+}
+--------------------------------------------------
+// CONSOLE
+// TESTSETUP
+// TEST[setup:remote_cluster_and_leader_index]
+
+[source,js]
+--------------------------------------------------
+POST /follower_index/_ccr/pause_follow
+--------------------------------------------------
+// CONSOLE
+// TEARDOWN
+
+//////////////////////////
+
+[source,js]
+--------------------------------------------------
+POST //_ccr/forget_follower
+{
+ "follower_cluster" : "",
+ "follower_index" : "",
+ "follower_index_uuid" : "",
+ "leader_remote_cluster" : ""
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[s//leader_index/]
+// TEST[s//follower_cluster/]
+// TEST[s//follower_index/]
+// TEST[s//follower_index_uuid/]
+// TEST[s//leader_remote_cluster/]
+// TEST[skip_shard_failures]
+
+[source,js]
+--------------------------------------------------
+{
+ "_shards" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0,
+ "failures" : [ ]
+ }
+}
+--------------------------------------------------
+// TESTRESPONSE[s/"total" : 1/"total" : $body._shards.total/]
+// TESTRESPONSE[s/"successful" : 1/"successful" : $body._shards.successful/]
+// TESTRESPONSE[s/"failed" : 0/"failed" : $body._shards.failed/]
+// TESTRESPONSE[s/"failures" : \[ \]/"failures" : $body._shards.failures/]
+
+==== Path Parameters
+
+`leader_index` (required)::
+ (string) the name of the leader index
+
+==== Request Body
+`follower_cluster` (required)::
+ (string) the name of the cluster containing the follower index
+
+`follower_index` (required)::
+ (string) the name of the follower index
+
+`follower_index_uuid` (required)::
+ (string) the UUID of the follower index
+
+`leader_remote_cluster` (required)::
+ (string) the alias (from the perspective of the cluster containing the
+ follower index) of the <> containing
+ the leader index
+
+==== Authorization
+
+If the {es} {security-features} are enabled, you must have `manage_leader_index`
+index privileges for the leader index. For more information, see
+{stack-ov}/security-privileges.html[Security privileges].
+
+==== Example
+
+This example removes the follower retention leases for `follower_index` from
+`leader_index`.
+
+[source,js]
+--------------------------------------------------
+POST /leader_index/_ccr/forget_follower
+{
+ "follower_cluster" : "",
+ "follower_index" : "follower_index",
+ "follower_index_uuid" : "",
+ "leader_remote_cluster" : ""
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[skip_shard_failures]
+
+The API returns the following result:
+
+[source,js]
+--------------------------------------------------
+{
+ "_shards" : {
+ "total" : 1,
+ "successful" : 1,
+ "failed" : 0,
+ "failures" : [ ]
+ }
+}
+--------------------------------------------------
+// TESTRESPONSE[s/"total" : 1/"total" : $body._shards.total/]
+// TESTRESPONSE[s/"successful" : 1/"successful" : $body._shards.successful/]
+// TESTRESPONSE[s/"failed" : 0/"failed" : $body._shards.failed/]
+// TESTRESPONSE[s/"failures" : \[ \]/"failures" : $body._shards.failures/]
diff --git a/docs/reference/migration/migrate_6_7.asciidoc b/docs/reference/migration/migrate_6_7.asciidoc
index 13b7d4b2cbc91..6e25291bc03d4 100644
--- a/docs/reference/migration/migrate_6_7.asciidoc
+++ b/docs/reference/migration/migrate_6_7.asciidoc
@@ -43,6 +43,24 @@ will result in an error. Additionally, there are two minor breaking changes here
- `plugin.mandatory` is no longer compatible with `ingest-geoip` nor
`ingest-user-agent`
+Elasticsearch 6.7.0 checks that there are no leftover geoip database files in
+the plugin configuration directory because the new module does not use them.
+Therefore, remove the `ingest-geoip` plugin prior to upgrading to 6.7.0 with
+the `--purge` option to also delete the old database files:
+
+[source,sh]
+------------------------------------------------------
+./bin/elasticsearch-plugin remove --purge ingest-geoip
+------------------------------------------------------
+
+Otherwise you will see the following error message upon startup (assuming
+`/etc/elasticsearch/ingest-geoip` as the plugin configuration directory):
+
+[source,text]
+---------------------------------------------------------------------------------------
+expected database [GeoLite2-ASN.mmdb] to not exist in [/etc/elasticsearch/ingest-geoip]
+---------------------------------------------------------------------------------------
+
[float]
[[breaking_67_settings_changes]]
diff --git a/docs/reference/release-notes/6.6.asciidoc b/docs/reference/release-notes/6.6.asciidoc
index a0f3c14399a87..0135a51022179 100644
--- a/docs/reference/release-notes/6.6.asciidoc
+++ b/docs/reference/release-notes/6.6.asciidoc
@@ -1,3 +1,57 @@
+[[release-notes-6.6.2]]
+== {es} version 6.6.1
+
+coming[6.6.2]
+
+Also see <>.
+
+[[breaking-6.6.2]]
+[float]
+=== Breaking changes
+
+Authentication::
+* Disable BWC mode in TokenService by default {pull}38881[#38881]
+
+
+
+[[enhancement-6.6.2]]
+[float]
+=== Enhancements
+
+SQL::
+* SQL: Enhance checks for inexact fields {pull}39427[#39427] (issue: {issue}38501[#38501])
+* SQL: add "validate.properties" property to JDBC's allowed list of settings {pull}39050[#39050] (issue: {issue}38068[#38068])
+
+
+
+[[bug-6.6.2]]
+[float]
+=== Bug fixes
+
+Authentication::
+* Use consistent view of realms for authentication {pull}38815[#38815] (issue: {issue}30301[#30301])
+
+Engine::
+* Bubble up exception when processing NoOp {pull}39338[#39338] (issue: {issue}38898[#38898])
+* Advance max_seq_no before add operation to Lucene {pull}38879[#38879] (issue: {issue}31629[#31629])
+
+Features/Watcher::
+* Only flush Watcher's bulk processor if Watcher is enabled {pull}38803[#38803] (issue: {issue}38798[#38798])
+
+Machine Learning::
+* [ML] Stop the ML memory tracker before closing node {pull}39111[#39111] (issue: {issue}37117[#37117])
+
+SQL::
+* SQL: Fix merging of incompatible multi-fields {pull}39560[#39560] (issue: {issue}39547[#39547])
+* SQL: ignore UNSUPPORTED fields for JDBC and ODBC modes in 'SYS COLUMNS' {pull}39518[#39518] (issue: {issue}39471[#39471])
+* SQL: Use underlying exact field for LIKE/RLIKE {pull}39443[#39443] (issue: {issue}39442[#39442])
+* SQL: enforce JDBC driver - ES server version parity {pull}38972[#38972] (issue: {issue}38775[#38775])
+* SQL: fall back to using the field name for column label {pull}38842[#38842] (issue: {issue}38831[#38831])
+* SQL: normalized keywords shouldn't be allowed for groupings and sorting [ISSUE] {pull}35203[#35203]
+
+Search::
+* Fix Fuzziness#asDistance(String) {pull}39643[#39643] (issue: {issue}39614[#39614])
+* Fix simple query string serialization conditional {pull}38960[#38960] (issues: {issue}21504[#21504], {issue}38889[#38889])
[[release-notes-6.6.1]]
== {es} version 6.6.1
diff --git a/docs/reference/release-notes/6.7.asciidoc b/docs/reference/release-notes/6.7.asciidoc
index 21bd757348673..46d686d73818c 100644
--- a/docs/reference/release-notes/6.7.asciidoc
+++ b/docs/reference/release-notes/6.7.asciidoc
@@ -376,6 +376,7 @@ Audit::
* Fix NPE in Logfile Audit Filter {pull}38120[#38120] (issue: {issue}38097[#38097])
Authentication::
+* Fix security index auto-create and state recovery race {pull}39582[#39582]
* Use consistent view of realms for authentication {pull}38815[#38815] (issue: {issue}30301[#30301])
* Enhance parsing of StatusCode in SAML Responses {pull}38628[#38628]
* Limit token expiry to 1 hour maximum {pull}38244[#38244]
@@ -403,6 +404,8 @@ CRUD::
* Fix Reindex from remote query logic {pull}36908[#36908]
Distributed::
+* Use cause to determine if node with primary is closing {pull}39723[#39723] (issue: {issue}39584[#39584])
+* Don’t ack if unable to remove failing replica {pull}39584[#39584] (issue: {issue}39467[#39467])
* Ignore waitForActiveShards when syncing leases {pull}39224[#39224] (issue: {issue}39089[#39089])
* Fix synchronization in LocalCheckpointTracker#contains {pull}38755[#38755] (issues: {issue}33871[#33871], {issue}38633[#38633])
* TransportVerifyShardBeforeCloseAction should force a flush {pull}38401[#38401] (issues: {issue}33888[#33888], {issue}37961[#37961])
@@ -417,6 +420,7 @@ Engine::
* Advance max_seq_no before add operation to Lucene {pull}38879[#38879] (issue: {issue}31629[#31629])
Features/Features::
+* Check for .watches that wasn't upgraded properly {pull}39609[#39609]
* Link to 7.0 documentation in deprecation checks {pull}39194[#39194]
* Handle Null in FetchSourceContext#fetchSource {pull}36839[#36839] (issue: {issue}29293[#29293])
@@ -454,12 +458,16 @@ Features/Monitoring::
* Allow built-in monitoring_user role to call GET _xpack API {pull}38060[#38060] (issue: {issue}37970[#37970])
Features/Watcher::
+* Use any index specified by .watches for Watcher {pull}39541[#39541] (issue: {issue}39478[#39478])
* Resolve concurrency with watcher trigger service {pull}39092[#39092] (issue: {issue}39087[#39087])
* Only flush Watcher's bulk processor if Watcher is enabled {pull}38803[#38803] (issue: {issue}38798[#38798])
Geo::
* Geo: Do not normalize the longitude with value -180 for Lucene shapes {pull}37299[#37299] (issue: {issue}37297[#37297])
+Highlighting::
+* Bug fix for AnnotatedTextHighlighter {pull}39525[#39525] (issue: {issue}39395[#39395])
+
Infra/Core::
* Correct name of basic_date_time_no_millis {pull}39367[#39367]
* Fix DateFormatters.parseMillis when no timezone is given {pull}39100[#39100] (issue: {issue}39067[#39067])
@@ -514,6 +522,7 @@ Recovery::
* RecoveryMonitor#lastSeenAccessTime should be volatile {pull}36781[#36781]
SQL::
+* SQL: Don't allow inexact fields for MIN/MAX {pull}39563[#39563] (issue: {issue}39427[#39427])
* SQL: Fix merging of incompatible multi-fields {pull}39560[#39560] (issue: {issue}39547[#39547])
* SQL: fix COUNT DISTINCT column name {pull}39537[#39537] (issue: {issue}39511[#39511])
* SQL: ignore UNSUPPORTED fields for JDBC and ODBC modes in 'SYS COLUMNS' {pull}39518[#39518] (issue: {issue}39471[#39471])
@@ -544,6 +553,7 @@ SQL::
* SQL: normalized keywords shouldn't be allowed for groupings and sorting [ISSUE] {pull}35203[#35203]
Search::
+* Fix Fuzziness#asDistance(String) {pull}39643[#39643] (issue: {issue}39614[#39614])
* Fix simple query string serialization conditional {pull}38960[#38960] (issues: {issue}21504[#21504], {issue}38889[#38889])
* Ensure that maxConcurrentShardRequests is never defaulted to 0 {pull}38734[#38734]
* Look up connection using the right cluster alias when releasing contexts {pull}38570[#38570]
@@ -570,6 +580,7 @@ Task Management::
* Un-assign persistent tasks as nodes exit the cluster {pull}37656[#37656]
ZenDiscovery::
+* Fixing the custom object serialization bug in diffable utils. {pull}39544[#39544]
* Always return metadata version if metadata is requested {pull}37674[#37674]
diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
index a4a58d0c9946a..835003521f2d9 100644
--- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
+++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java
@@ -57,6 +57,7 @@
import org.elasticsearch.index.mapper.TextFieldMapper;
import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText.AnnotationToken;
import org.elasticsearch.index.query.QueryShardContext;
+import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
import java.io.IOException;
import java.io.Reader;
@@ -317,46 +318,13 @@ public AnnotationToken getAnnotation(int index) {
// When asked to tokenize plain-text versions by the highlighter it tokenizes the
// original markup form in order to inject annotations.
public static final class AnnotatedHighlighterAnalyzer extends AnalyzerWrapper {
- private Analyzer delegate;
- private AnnotatedText[] annotations;
- public AnnotatedHighlighterAnalyzer(Analyzer delegate){
+ private final Analyzer delegate;
+ private final HitContext hitContext;
+ public AnnotatedHighlighterAnalyzer(Analyzer delegate, HitContext hitContext){
super(delegate.getReuseStrategy());
this.delegate = delegate;
+ this.hitContext = hitContext;
}
-
- public void init(String[] markedUpFieldValues) {
- this.annotations = new AnnotatedText[markedUpFieldValues.length];
- for (int i = 0; i < markedUpFieldValues.length; i++) {
- annotations[i] = AnnotatedText.parse(markedUpFieldValues[i]);
- }
- }
-
- public String [] getPlainTextValuesForHighlighter(){
- String [] result = new String[annotations.length];
- for (int i = 0; i < annotations.length; i++) {
- result[i] = annotations[i].textMinusMarkup;
- }
- return result;
- }
-
- public AnnotationToken[] getIntersectingAnnotations(int start, int end) {
- List intersectingAnnotations = new ArrayList<>();
- int fieldValueOffset =0;
- for (AnnotatedText fieldValueAnnotations : this.annotations) {
- //This is called from a highlighter where all of the field values are concatenated
- // so each annotation offset will need to be adjusted so that it takes into account
- // the previous values AND the MULTIVAL delimiter
- for (AnnotationToken token : fieldValueAnnotations.annotations) {
- if(token.intersects(start - fieldValueOffset , end - fieldValueOffset)) {
- intersectingAnnotations.add(new AnnotationToken(token.offset + fieldValueOffset,
- token.endOffset + fieldValueOffset, token.value));
- }
- }
- //add 1 for the fieldvalue separator character
- fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1;
- }
- return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]);
- }
@Override
public Analyzer getWrappedAnalyzer(String fieldName) {
@@ -370,7 +338,8 @@ protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComp
return components;
}
AnnotationsInjector injector = new AnnotationsInjector(components.getTokenStream());
- return new AnnotatedHighlighterTokenStreamComponents(components.getTokenizer(), injector, this.annotations);
+ AnnotatedText[] annotations = (AnnotatedText[]) hitContext.cache().get(AnnotatedText.class.getName());
+ return new AnnotatedHighlighterTokenStreamComponents(components.getTokenizer(), injector, annotations);
}
}
private static final class AnnotatedHighlighterTokenStreamComponents extends TokenStreamComponents{
diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java
index ad1acc85031dd..7d360dd0b9bac 100644
--- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java
+++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedPassageFormatter.java
@@ -23,7 +23,7 @@
import org.apache.lucene.search.uhighlight.Passage;
import org.apache.lucene.search.uhighlight.PassageFormatter;
import org.apache.lucene.search.uhighlight.Snippet;
-import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer;
+import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText;
import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText.AnnotationToken;
import java.io.UnsupportedEncodingException;
@@ -42,11 +42,11 @@ public class AnnotatedPassageFormatter extends PassageFormatter {
public static final String SEARCH_HIT_TYPE = "_hit_term";
private final Encoder encoder;
- private AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer;
+ AnnotatedText[] annotations;
- public AnnotatedPassageFormatter(AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer, Encoder encoder) {
- this.annotatedHighlighterAnalyzer = annotatedHighlighterAnalyzer;
+ public AnnotatedPassageFormatter(AnnotatedText[] annotations, Encoder encoder) {
this.encoder = encoder;
+ this.annotations = annotations;
}
static class MarkupPassage {
@@ -158,7 +158,7 @@ public Snippet[] format(Passage[] passages, String content) {
int pos;
int j = 0;
for (Passage passage : passages) {
- AnnotationToken [] annotations = annotatedHighlighterAnalyzer.getIntersectingAnnotations(passage.getStartOffset(),
+ AnnotationToken [] annotations = getIntersectingAnnotations(passage.getStartOffset(),
passage.getEndOffset());
MarkupPassage mergedMarkup = mergeAnnotations(annotations, passage);
@@ -194,6 +194,27 @@ public Snippet[] format(Passage[] passages, String content) {
}
return snippets;
}
+
+ public AnnotationToken[] getIntersectingAnnotations(int start, int end) {
+ List intersectingAnnotations = new ArrayList<>();
+ int fieldValueOffset =0;
+ for (AnnotatedText fieldValueAnnotations : this.annotations) {
+ //This is called from a highlighter where all of the field values are concatenated
+ // so each annotation offset will need to be adjusted so that it takes into account
+ // the previous values AND the MULTIVAL delimiter
+ for (int i = 0; i < fieldValueAnnotations.numAnnotations(); i++) {
+ AnnotationToken token = fieldValueAnnotations.getAnnotation(i);
+ if (token.intersects(start - fieldValueOffset, end - fieldValueOffset)) {
+ intersectingAnnotations
+ .add(new AnnotationToken(token.offset + fieldValueOffset, token.endOffset +
+ fieldValueOffset, token.value));
+ }
+ }
+ //add 1 for the fieldvalue separator character
+ fieldValueOffset +=fieldValueAnnotations.textMinusMarkup.length() +1;
+ }
+ return intersectingAnnotations.toArray(new AnnotationToken[intersectingAnnotations.size()]);
+ }
private void append(StringBuilder dest, String content, int start, int end) {
dest.append(encoder.encodeText(content.substring(start, end)));
diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java
index d93316c78921a..2ba7838b90950 100644
--- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java
+++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AnnotatedTextHighlighter.java
@@ -25,24 +25,22 @@
import org.elasticsearch.index.mapper.DocumentMapper;
import org.elasticsearch.index.mapper.MappedFieldType;
import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedHighlighterAnalyzer;
+import org.elasticsearch.index.mapper.annotatedtext.AnnotatedTextFieldMapper.AnnotatedText;
import org.elasticsearch.search.fetch.FetchSubPhase.HitContext;
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight.Field;
import org.elasticsearch.search.internal.SearchContext;
import java.io.IOException;
-import java.util.Arrays;
+import java.util.ArrayList;
import java.util.List;
public class AnnotatedTextHighlighter extends UnifiedHighlighter {
public static final String NAME = "annotated";
-
- AnnotatedHighlighterAnalyzer annotatedHighlighterAnalyzer = null;
@Override
- protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type) {
- annotatedHighlighterAnalyzer = new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, type));
- return annotatedHighlighterAnalyzer;
+ protected Analyzer getAnalyzer(DocumentMapper docMapper, MappedFieldType type, HitContext hitContext) {
+ return new AnnotatedHighlighterAnalyzer(super.getAnalyzer(docMapper, type, hitContext), hitContext);
}
// Convert the marked-up values held on-disk to plain-text versions for highlighting
@@ -51,14 +49,26 @@ protected List