diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index 9cc5bb82552ab..85fe712fd8d85 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -744,6 +744,7 @@ class BuildPlugin implements Plugin { additionalTest.testClassesDir = test.testClassesDir additionalTest.configure(commonTestConfig(project)) additionalTest.configure(config) + additionalTest.dependsOn(project.tasks.testClasses) test.dependsOn(additionalTest) }); return test diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy index 15a4f21b17543..adacc1863c595 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/doc/RestTestsFromSnippetsTask.groovy @@ -225,6 +225,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask { * warning every time. */ current.println(" - skip:") current.println(" features: ") + current.println(" - default_shards") current.println(" - stash_in_key") current.println(" - stash_in_path") current.println(" - stash_path_replace") diff --git a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java index 9210526e7c81c..d32c37dc2c44f 100644 --- a/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/elasticsearch/client/benchmark/rest/RestClientBenchmark.java @@ -18,27 +18,19 @@ */ package org.elasticsearch.client.benchmark.rest; -import org.apache.http.HttpEntity; import org.apache.http.HttpHeaders; import org.apache.http.HttpHost; import org.apache.http.HttpStatus; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.conn.ConnectionKeepAliveStrategy; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.benchmark.AbstractBenchmark; import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor; import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor; import java.io.IOException; -import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.List; import java.util.Locale; @@ -86,9 +78,10 @@ public boolean bulkIndex(List bulkData) { bulkRequestBody.append(bulkItem); bulkRequestBody.append("\n"); } - HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON); + Request request = new Request("POST", "/geonames/type/_noop_bulk"); + request.setJsonEntity(bulkRequestBody.toString()); try { - Response response = client.performRequest("POST", "/geonames/type/_noop_bulk", Collections.emptyMap(), entity); + Response response = client.performRequest(request); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (Exception e) { throw new ElasticsearchException(e); @@ -107,9 +100,10 @@ private RestSearchRequestExecutor(RestClient client, String indexName) { @Override public boolean search(String source) { - HttpEntity searchBody = new NStringEntity(source, StandardCharsets.UTF_8); + Request request = new Request("GET", endpoint); + request.setJsonEntity(source); try { - Response response = client.performRequest("GET", endpoint, Collections.emptyMap(), searchBody); + Response response = client.performRequest(request); return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; } catch (IOException e) { throw new ElasticsearchException(e); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java index 7f59fcc831213..9782b1016b421 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorIT.java @@ -194,18 +194,16 @@ public void testBulkProcessorWaitOnClose() throws Exception { } public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { - - String createIndexBody = "{\n" + + Request request = new Request("PUT", "/test-ro"); + request.setJsonEntity("{\n" + " \"settings\" : {\n" + " \"index\" : {\n" + " \"blocks.write\" : true\n" + " }\n" + " }\n" + " \n" + - "}"; - - NStringEntity entity = new NStringEntity(createIndexBody, ContentType.APPLICATION_JSON); - Response response = client().performRequest("PUT", "/test-ro", Collections.emptyMap(), entity); + "}"); + Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); int bulkActions = randomIntBetween(10, 100); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index ee820871dbb3d..f384e5706b09a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -19,9 +19,6 @@ package org.elasticsearch.client; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.action.DocWriteRequest; @@ -39,6 +36,7 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; +import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.common.Strings; @@ -147,11 +145,10 @@ public void testExists() throws IOException { GetRequest getRequest = new GetRequest("index", "type", "id"); assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); } - String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"), - stringEntity); - assertEquals(201, response.getStatusLine().getStatusCode()); + IndexRequest index = new IndexRequest("index", "type", "id"); + index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON); + index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + highLevelClient().index(index); { GetRequest getRequest = new GetRequest("index", "type", "id"); assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); @@ -175,12 +172,11 @@ public void testGet() throws IOException { assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } - + IndexRequest index = new IndexRequest("index", "type", "id"); String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"), - stringEntity); - assertEquals(201, response.getStatusLine().getStatusCode()); + index.source(document, XContentType.JSON); + index.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + highLevelClient().index(index); { GetRequest getRequest = new GetRequest("index", "type", "id").version(2); ElasticsearchException exception = expectThrows(ElasticsearchException.class, @@ -271,18 +267,15 @@ public void testMultiGet() throws IOException { assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", response.getResponses()[1].getFailure().getFailure().getMessage()); } - - String document = "{\"field\":\"value1\"}"; - StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - Response r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id1", Collections.singletonMap("refresh", "true"), - stringEntity); - assertEquals(201, r.getStatusLine().getStatusCode()); - - document = "{\"field\":\"value2\"}"; - stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); - r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id2", Collections.singletonMap("refresh", "true"), stringEntity); - assertEquals(201, r.getStatusLine().getStatusCode()); - + BulkRequest bulk = new BulkRequest(); + bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE); + IndexRequest index = new IndexRequest("index", "type", "id1"); + index.source("{\"field\":\"value1\"}", XContentType.JSON); + bulk.add(index); + index = new IndexRequest("index", "type", "id2"); + index.source("{\"field\":\"value2\"}", XContentType.JSON); + bulk.add(index); + highLevelClient().bulk(bulk); { MultiGetRequest multiGetRequest = new MultiGetRequest(); multiGetRequest.add("index", "type", "id1"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 9828041332b32..549b4ce0a85c5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -312,14 +312,14 @@ public void testSearchWithMatrixStats() throws IOException { MatrixStats matrixStats = searchResponse.getAggregations().get("agg1"); assertEquals(5, matrixStats.getFieldCount("num")); assertEquals(56d, matrixStats.getMean("num"), 0d); - assertEquals(1830d, matrixStats.getVariance("num"), 0d); - assertEquals(0.09340198804973046, matrixStats.getSkewness("num"), 0d); + assertEquals(1830.0000000000002, matrixStats.getVariance("num"), 0d); + assertEquals(0.09340198804973039, matrixStats.getSkewness("num"), 0d); assertEquals(1.2741646510794589, matrixStats.getKurtosis("num"), 0d); assertEquals(5, matrixStats.getFieldCount("num2")); assertEquals(29d, matrixStats.getMean("num2"), 0d); assertEquals(330d, matrixStats.getVariance("num2"), 0d); assertEquals(-0.13568039346585542, matrixStats.getSkewness("num2"), 1.0e-16); - assertEquals(1.3517561983471074, matrixStats.getKurtosis("num2"), 0d); + assertEquals(1.3517561983471071, matrixStats.getKurtosis("num2"), 0d); assertEquals(-767.5, matrixStats.getCovariance("num", "num2"), 0d); assertEquals(-0.9876336291667923, matrixStats.getCorrelation("num", "num2"), 0d); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java index 9b0b1ab83a460..6641aa2fc7d25 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java @@ -19,8 +19,6 @@ package org.elasticsearch.client.documentation; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpPost; import org.apache.http.entity.ContentType; import org.apache.http.nio.entity.NStringEntity; import org.elasticsearch.ElasticsearchException; @@ -49,6 +47,7 @@ import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.common.Strings; @@ -58,6 +57,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; import org.elasticsearch.rest.RestStatus; @@ -271,16 +271,15 @@ public void testUpdate() throws Exception { IndexResponse indexResponse = client.index(indexRequest); assertSame(indexResponse.status(), RestStatus.CREATED); - XContentType xContentType = XContentType.JSON; - String script = Strings.toString(XContentBuilder.builder(xContentType.xContent()) + Request request = new Request("POST", "/_scripts/increment-field"); + request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder() .startObject() .startObject("script") .field("lang", "painless") .field("code", "ctx._source.field += params.count") .endObject() - .endObject()); - HttpEntity body = new NStringEntity(script, ContentType.create(xContentType.mediaType())); - Response response = client().performRequest(HttpPost.METHOD_NAME, "/_scripts/increment-field", emptyMap(), body); + .endObject())); + Response response = client().performRequest(request); assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus()); } { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java index 650ab882c36d2..489d4d9b1ed5f 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/MigrationDocumentationIT.java @@ -30,6 +30,7 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -66,58 +67,22 @@ * -------------------------------------------------- */ public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase { - - public void testCreateIndex() throws IOException { - RestHighLevelClient client = highLevelClient(); - { - //tag::migration-create-index - Settings indexSettings = Settings.builder() // <1> - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - - String payload = Strings.toString(XContentFactory.jsonBuilder() // <2> - .startObject() - .startObject("settings") // <3> - .value(indexSettings) - .endObject() - .startObject("mappings") // <4> - .startObject("doc") - .startObject("properties") - .startObject("time") - .field("type", "date") - .endObject() - .endObject() - .endObject() - .endObject() - .endObject()); - - HttpEntity entity = new NStringEntity(payload, ContentType.APPLICATION_JSON); // <5> - - Response response = client.getLowLevelClient().performRequest("PUT", "my-index", emptyMap(), entity); // <6> - if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) { - // <7> - } - //end::migration-create-index - assertEquals(200, response.getStatusLine().getStatusCode()); - } - } - public void testClusterHealth() throws IOException { RestHighLevelClient client = highLevelClient(); { //tag::migration-cluster-health - Map parameters = singletonMap("wait_for_status", "green"); - Response response = client.getLowLevelClient().performRequest("GET", "/_cluster/health", parameters); // <1> + Request request = new Request("GET", "/_cluster/health"); + request.addParameter("wait_for_status", "green"); // <1> + Response response = client.getLowLevelClient().performRequest(request); // <2> ClusterHealthStatus healthStatus; - try (InputStream is = response.getEntity().getContent()) { // <2> - Map map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <3> - healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <4> + try (InputStream is = response.getEntity().getContent()) { // <3> + Map map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <4> + healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <5> } - if (healthStatus == ClusterHealthStatus.GREEN) { - // <5> + if (healthStatus != ClusterHealthStatus.GREEN) { + // <6> } //end::migration-cluster-health assertSame(ClusterHealthStatus.GREEN, healthStatus); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 4400d05a9f820..6fdc60fcb3394 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -800,7 +800,7 @@ public void testRankEval() throws Exception { double qualityLevel = evalQuality.getQualityLevel(); // <3> assertEquals(1.0 / 3.0, qualityLevel, 0.0); List hitsAndRatings = evalQuality.getHitsAndRatings(); - RatedSearchHit ratedSearchHit = hitsAndRatings.get(0); + RatedSearchHit ratedSearchHit = hitsAndRatings.get(2); assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4> assertFalse(ratedSearchHit.getRating().isPresent()); // <5> MetricDetail metricDetails = evalQuality.getMetricDetails(); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java index b83115a5341dd..6625c389c6be8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java @@ -26,13 +26,16 @@ import org.apache.http.Header; import org.apache.http.HttpEntity; +import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; @@ -151,6 +154,103 @@ public void testSetHeaders() { assertArrayEquals(headers, request.getHeaders()); } - // TODO equals and hashcode + public void testEqualsAndHashCode() { + Request request = randomRequest(); + assertEquals(request, request); + Request copy = copy(request); + assertEquals(request, copy); + assertEquals(copy, request); + assertEquals(request.hashCode(), copy.hashCode()); + + Request mutant = mutate(request); + assertNotEquals(request, mutant); + assertNotEquals(mutant, request); + } + + private Request randomRequest() { + Request request = new Request( + randomFrom(new String[] {"GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS"}), + randomAsciiAlphanumOfLength(5)); + + int parameterCount = between(0, 5); + for (int i = 0; i < parameterCount; i++) { + request.addParameter(randomAsciiAlphanumOfLength(i), randomAsciiLettersOfLength(3)); + } + + if (randomBoolean()) { + if (randomBoolean()) { + request.setJsonEntity(randomAsciiAlphanumOfLength(10)); + } else { + request.setEntity(randomFrom(new HttpEntity[] { + new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new NStringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON) + })); + } + } + + if (randomBoolean()) { + int headerCount = between(1, 5); + Header[] headers = new Header[headerCount]; + for (int i = 0; i < headerCount; i++) { + headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + } + request.setHeaders(headers); + } + + if (randomBoolean()) { + request.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); + } + + return request; + } + + private Request copy(Request request) { + Request copy = new Request(request.getMethod(), request.getEndpoint()); + copyMutables(request, copy); + return copy; + } + + private Request mutate(Request request) { + if (randomBoolean()) { + // Mutate request or method but keep everything else constant + Request mutant = randomBoolean() + ? new Request(request.getMethod() + "m", request.getEndpoint()) + : new Request(request.getMethod(), request.getEndpoint() + "m"); + copyMutables(request, mutant); + return mutant; + } + Request mutant = copy(request); + int mutationType = between(0, 3); + switch (mutationType) { + case 0: + mutant.addParameter(randomAsciiAlphanumOfLength(mutant.getParameters().size() + 4), "extra"); + return mutant; + case 1: + mutant.setJsonEntity("mutant"); // randomRequest can't produce this value + return mutant; + case 2: + if (mutant.getHeaders().length > 0) { + mutant.setHeaders(new Header[0]); + } else { + mutant.setHeaders(new BasicHeader("extra", "m")); + } + return mutant; + case 3: + mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); + return mutant; + default: + throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]"); + } + } + + private void copyMutables(Request from, Request to) { + for (Map.Entry param : from.getParameters().entrySet()) { + to.addParameter(param.getKey(), param.getValue()); + } + to.setEntity(from.getEntity()); + to.setHeaders(from.getHeaders()); + to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory()); + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 8a5e782cc09d3..35cac627bbe6a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -377,11 +377,13 @@ private Response bodyTest(final RestClient restClient, final String method) thro private Response bodyTest(RestClient restClient, String method, int statusCode, Header[] headers) throws IOException { String requestBody = "{ \"field\": \"value\" }"; - StringEntity entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); + Request request = new Request(method, "/" + statusCode); + request.setJsonEntity(requestBody); + request.setHeaders(headers); Response esResponse; try { - esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), entity, headers); - } catch (ResponseException e) { + esResponse = restClient.performRequest(request); + } catch(ResponseException e) { esResponse = e.getResponse(); } assertEquals(method, esResponse.getRequestLine().getMethod()); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 2d419b213d686..714d2e57e6d20 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -58,11 +58,9 @@ import java.net.URI; import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.TreeMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 872b327954b02..ea124828e45eb 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -96,7 +96,7 @@ public void onFailure(Exception exception) { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. */ @Deprecated public void testPerformOldStyleAsyncWithNullParams() throws Exception { diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java index c61b736bf6db1..74cc251f52c2f 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/CreatedLocationHeaderIT.java @@ -21,18 +21,22 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; +import org.junit.Before; import java.io.IOException; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.startsWith; /** * Tests for the "Location" header returned when returning {@code 201 CREATED}. */ public class CreatedLocationHeaderIT extends ESRestTestCase { + public void testCreate() throws IOException { locationTestCase("PUT", "test/test/1"); } @@ -54,8 +58,11 @@ public void testUpsert() throws IOException { private void locationTestCase(String method, String url) throws IOException { locationTestCase(client().performRequest(method, url, emptyMap(), new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); + // we have to delete the index otherwise the second indexing request will route to the single shard and not produce a 201 + final Response response = client().performRequest(new Request("DELETE", "test")); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); locationTestCase(client().performRequest(method, url + "?routing=cat", emptyMap(), - new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); + new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); } private void locationTestCase(Response response) throws IOException { @@ -65,4 +72,5 @@ private void locationTestCase(Response response) throws IOException { Response getResponse = client().performRequest("GET", location); assertEquals(singletonMap("test", "test"), entityAsMap(getResponse).get("_source")); } + } diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc deleted file mode 100644 index 6eb26fde8f9f8..0000000000000 --- a/docs/CHANGELOG.asciidoc +++ /dev/null @@ -1,257 +0,0 @@ -[[es-release-notes]] -= {es} Release Notes - -[partintro] --- -// To add a release, copy and paste the template text -// and add a link to the new section. Note that release subheads must -// be floated and sections cannot be empty. - -// Use these for links to issue and pulls. Note issues and pulls redirect one to -// each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/elasticsearch/issues/ -:pull: https://github.com/elastic/elasticsearch/pull/ - -This section summarizes the changes in each release. - -* <> -* <> -* <> - --- - -//// -// To add a release, copy and paste the following text, uncomment the relevant -// sections, and add a link to the new section in the list of releases at the -// top of the page. Note that release subheads must be floated and sections -// cannot be empty. -// TEMPLATE: - -// [[release-notes-n.n.n]] -// == {es} n.n.n - -//[float] -[[breaking-n.n.n]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -//[float] -//=== Bug Fixes - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -//// - -[[release-notes-7.0.0]] -== {es} 7.0.0 - -coming[7.0.0] - -[float] -[[breaking-7.0.0]] -=== Breaking Changes - -<> ({pull}29609[#29609]) - -<> ({pull}29004[#29004]) -<> ({pull}29635[#29635]) - -<> ({pull}30185[#30185]) - -Machine Learning:: -* The `max_running_jobs` node property is removed in this release. Use the -`xpack.ml.max_open_jobs` setting instead. For more information, see <>. - -* <> ({pull}29601[#29601]) - -//[float] -//=== Breaking Java Changes - -[float] -=== Deprecations -Monitoring:: -* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1` -to disable monitoring data collection. Use `xpack.monitoring.collection.enabled` -and set it to `false` (its default), which was added in 6.3.0. - -Security:: -* The fields returned as part of the mappings section by get index, get -mappings, get field mappings, and field capabilities API are now only the -ones that the user is authorized to access in case field level security is enabled. - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) - -Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) - -Fixed prerelease version of elasticsearch in the `deb` package to sort before GA versions -({pull}29000[#29000]) - -Rollup:: -* Validate timezone in range queries to ensure they match the selected job when -searching ({pull}30338[#30338]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -[float] -=== Regressions -Fail snapshot operations early when creating or deleting a snapshot on a repository that has been -written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) - -Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) -Do not fail snapshot when deleting a missing snapshotted file ({pull}30332[#30332]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.4.0]] -== {es} 6.4.0 - -coming[6.4.0] - -//[float] -[[breaking-6.4.0]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -[float] -=== Deprecations - -Deprecated multi-argument versions of the request methods in the RestClient. -Prefer the "Request" object flavored methods. ({pull}30315[#30315]) - -[float] -=== New Features - -The new <> field allows to know which fields -got ignored at index time because of the <> -option. ({pull}30140[#29658]) - -A new analysis plugin called `analysis_nori` that exposes the Lucene Korean -analysis module. ({pull}30397[#30397]) - -[float] -=== Enhancements - -{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow -copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404]) - -Added new "Request" object flavored request methods in the RestClient. Prefer -these instead of the multi-argument versions. ({pull}29623[#29623]) - -Added `setJsonEntity` to `Request` object so it is marginally easier to send JSON. ({pull}30447[#30447]) -Watcher HTTP client used in watches now allows more parallel connections to the -same endpoint and evicts long running connections. ({pull}30130[#30130]) - -The cluster state listener to decide if watcher should be -stopped/started/paused now runs far less code in an executor but is more -synchronous and predictable. Also the trigger engine thread is only started on -data nodes. And the Execute Watch API can be triggered regardless is watcher is -started or stopped. ({pull}30118[#30118]) - -Added put index template API to the high level rest client ({pull}30400[#30400]) - -Add ability to filter coordinating-only nodes when interacting with cluster -APIs. ({pull}30313[#30313]) - -[float] -=== Bug Fixes - -Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) - -Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) - -Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) - -Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) - -Machine Learning:: - -* Account for gaps in data counts after job is reopened ({pull}30294[#30294]) - -Add validation that geohashes are not empty and don't contain unsupported characters ({pull}30376[#30376]) - -Rollup:: -* Validate timezone in range queries to ensure they match the selected job when -searching ({pull}30338[#30338]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -Allocation:: - -Auto-expand replicas when adding or removing nodes to prevent shard copies from -being dropped and resynced when a data node rejoins the cluster ({pull}30423[#30423]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.3.1]] -== Elasticsearch version 6.3.1 - -coming[6.3.1] - -//[float] -[[breaking-6.3.1]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180]) - -Respect accept header on requests with no handler ({pull}30383[#30383]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues diff --git a/docs/java-rest/high-level/migration.asciidoc b/docs/java-rest/high-level/migration.asciidoc index 1349ccb35fe3b..ad4e0613fc14a 100644 --- a/docs/java-rest/high-level/migration.asciidoc +++ b/docs/java-rest/high-level/migration.asciidoc @@ -2,7 +2,7 @@ == Migration Guide This section describes how to migrate existing code from the `TransportClient` -to the new Java High Level REST Client released with the version 5.6.0 +to the Java High Level REST Client released with the version 5.6.0 of Elasticsearch. === Motivations around a new Java client @@ -107,9 +107,6 @@ More importantly, the high-level client: request constructors like `new IndexRequest()` to create requests objects. The requests are then executed using synchronous or asynchronous dedicated methods like `client.index()` or `client.indexAsync()`. -- does not provide indices or cluster management APIs. Management -operations can be executed by external scripts or -<>. ==== How to migrate the way requests are built @@ -241,71 +238,6 @@ returned by the cluster. <4> The `onFailure()` method is called when an error occurs during the execution of the request. -[[java-rest-high-level-migration-manage-indices]] -==== Manage Indices using the Low-Level REST Client - -The low-level client is able to execute any kind of HTTP requests, and can -therefore be used to call the APIs that are not yet supported by the high level client. - -For example, creating a new index with the `TransportClient` may look like this: - -[source,java] --------------------------------------------------- -Settings settings = Settings.builder() // <1> - .put(SETTING_NUMBER_OF_SHARDS, 1) - .put(SETTING_NUMBER_OF_REPLICAS, 0) - .build(); - -String mappings = XContentFactory.jsonBuilder() // <2> - .startObject() - .startObject("doc") - .startObject("properties") - .startObject("time") - .field("type", "date") - .endObject() - .endObject() - .endObject() - .endObject() - .string(); - -CreateIndexResponse response = transportClient.admin().indices() // <3> - .prepareCreate("my-index") - .setSettings(indexSettings) - .addMapping("doc", docMapping, XContentType.JSON) - .get(); - -if (response.isAcknowledged() == false) { - // <4> -} --------------------------------------------------- -<1> Define the settings of the index -<2> Define the mapping for document of type `doc` using a -`XContentBuilder` -<3> Create the index with the previous settings and mapping -using the `prepareCreate()` method. The execution is synchronous -and blocks on the `get()` method until the remote cluster returns -a response. -<4> Handle the situation where the index has not been created - -The same operation executed with the low-level client could be: - -["source","java",subs="attributes,callouts,macros"] --------------------------------------------------- -include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-create-index] --------------------------------------------------- -<1> Define the settings of the index -<2> Define the body of the HTTP request using a `XContentBuilder` with JSON format -<3> Include the settings in the request body -<4> Include the mappings in the request body -<5> Convert the request body from `String` to a `HttpEntity` and -set its content type (here, JSON) -<6> Execute the request using the low-level client. The execution is synchronous -and blocks on the `performRequest()` method until the remote cluster returns -a response. The low-level client can be retrieved from an existing `RestHighLevelClient` -instance through the `getLowLevelClient` getter method. -<7> Handle the situation where the index has not been created - - [[java-rest-high-level-migration-cluster-health]] ==== Checking Cluster Health using the Low-Level REST Client @@ -331,18 +263,18 @@ With the low-level client, the code can be changed to: -------------------------------------------------- include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-cluster-health] -------------------------------------------------- -<1> Call the cluster's health REST endpoint and wait for the cluster health to become green, -then get back a `Response` object. -<2> Retrieve an `InputStream` object in order to read the response's content -<3> Parse the response's content using Elasticsearch's helper class `XContentHelper`. This +<1> Set up the request to wait for the cluster's health to become green if it isn't already. +<2> Make the request and the get back a `Response` object. +<3> Retrieve an `InputStream` object in order to read the response's content +<4> Parse the response's content using Elasticsearch's helper class `XContentHelper`. This helper requires the content type of the response to be passed as an argument and returns a `Map` of objects. Values in the map can be of any type, including inner `Map` that are used to represent the JSON object hierarchy. -<4> Retrieve the value of the `status` field in the response map, casts it as a a `String` +<5> Retrieve the value of the `status` field in the response map, casts it as a a `String` object and use the `ClusterHealthStatus.fromString()` method to convert it as a `ClusterHealthStatus` object. This method throws an exception if the value does not corresponds to a valid cluster health status. -<5> Handle the situation where the cluster's health is not green +<6> Handle the situation where the cluster's health is not green Note that for convenience this example uses Elasticsearch's helpers to parse the JSON response body, but any other JSON parser could have been use instead. diff --git a/docs/reference/aggregations/bucket/children-aggregation.asciidoc b/docs/reference/aggregations/bucket/children-aggregation.asciidoc index e616359e8a818..3805b2e564ca4 100644 --- a/docs/reference/aggregations/bucket/children-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/children-aggregation.asciidoc @@ -137,8 +137,8 @@ Possible response: "took": 25, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 59cadf1518eba..7dd5dca61b9e4 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -60,8 +60,8 @@ The response for the above aggregation: "aggregations": { "centroid": { "location": { - "lat": 51.00982963806018, - "lon": 3.9662131061777472 + "lat": 51.009829603135586, + "lon": 3.9662130642682314 }, "count": 6 } @@ -113,8 +113,8 @@ The response for the above aggregation: "doc_count": 3, "centroid": { "location": { - "lat": 52.371655656024814, - "lon": 4.909563297405839 + "lat": 52.371655642054975, + "lon": 4.9095632415264845 }, "count": 3 } @@ -125,7 +125,7 @@ The response for the above aggregation: "centroid": { "location": { "lat": 48.86055548675358, - "lon": 2.3316944623366 + "lon": 2.331694420427084 }, "count": 2 } diff --git a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc index 53c7d913ad2f1..cc873a4fe89ff 100644 --- a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc @@ -9,20 +9,6 @@ Input text is lowercased, normalized to remove extended characters, sorted, deduplicated and concatenated into a single token. If a stopword list is configured, stop words will also be removed. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters (in order):: -1. <> -2. <> -3. <> (disabled by default) -4. <> - [float] === Example output @@ -149,3 +135,46 @@ The above example produces the following term: --------------------------- [ consistent godel said sentence yes ] --------------------------- + +[float] +=== Definition + +The `fingerprint` tokenizer consists of: + +Tokenizer:: +* <> + +Token Filters (in order):: +* <> +* <> +* <> (disabled by default) +* <> + +If you need to customize the `fingerprint` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`fingerprint` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /fingerprint_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_fingerprint": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "asciifolding", + "fingerprint" + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: fingerprint_example, first: fingerprint, second: rebuilt_fingerprint}\nendyaml\n/] diff --git a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc index cc94f3b757e37..954b514ced605 100644 --- a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc @@ -4,14 +4,6 @@ The `keyword` analyzer is a ``noop'' analyzer which returns the entire input string as a single token. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -57,3 +49,40 @@ The above sentence would produce the following single term: === Configuration The `keyword` analyzer is not configurable. + +[float] +=== Definition + +The `keyword` analyzer consists of: + +Tokenizer:: +* <> + +If you need to customize the `keyword` analyzer then you need to +recreate it as a `custom` analyzer and modify it, usually by adding +token filters. Usually, you should prefer the +<> when you want strings that are not split +into tokens, but just in case you need it, this would recreate the +built-in `keyword` analyzer and you can use it as a starting point +for further customization: + +[source,js] +---------------------------------------------------- +PUT /keyword_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_keyword": { + "tokenizer": "keyword", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: keyword_example, first: keyword, second: rebuilt_keyword}\nendyaml\n/] +<1> You'd add any token filters here. diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 64ab3999ef9a9..027f37280a67d 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -19,19 +19,6 @@ Read more about http://www.regular-expressions.info/catastrophic.html[pathologic ======================================== - -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters:: -* <> -* <> (disabled by default) - [float] === Example output @@ -378,3 +365,51 @@ The regex above is easier to understand as: [\p{L}&&[^\p{Lu}]] # then lower case ) -------------------------------------------------- + +[float] +=== Definition + +The `pattern` anlayzer consists of: + +Tokenizer:: +* <> + +Token Filters:: +* <> +* <> (disabled by default) + +If you need to customize the `pattern` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`pattern` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /pattern_example +{ + "settings": { + "analysis": { + "tokenizer": { + "split_on_non_word": { + "type": "pattern", + "pattern": "\\W+" <1> + } + }, + "analyzer": { + "rebuilt_pattern": { + "tokenizer": "split_on_non_word", + "filter": [ + "lowercase" <2> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: pattern_example, first: pattern, second: rebuilt_pattern}\nendyaml\n/] +<1> The default pattern is `\W+` which splits on non-word characters +and this is where you'd change it. +<2> You'd add other token filters after `lowercase`. diff --git a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc index a57c30d8dd622..d82655d9bd8e1 100644 --- a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc @@ -4,14 +4,6 @@ The `simple` analyzer breaks text into terms whenever it encounters a character which is not a letter. All terms are lower cased. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -127,3 +119,37 @@ The above sentence would produce the following terms: === Configuration The `simple` analyzer is not configurable. + +[float] +=== Definition + +The `simple` analzyer consists of: + +Tokenizer:: +* <> + +If you need to customize the `simple` analyzer then you need to recreate +it as a `custom` analyzer and modify it, usually by adding token filters. +This would recreate the built-in `simple` analyzer and you can use it as +a starting point for further customization: + +[source,js] +---------------------------------------------------- +PUT /simple_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_simple": { + "tokenizer": "lowercase", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: simple_example, first: simple, second: rebuilt_simple}\nendyaml\n/] +<1> You'd add any token filters here. diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc index eacbb1c3cad99..20aa072066b5f 100644 --- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc @@ -7,19 +7,6 @@ Segmentation algorithm, as specified in http://unicode.org/reports/tr29/[Unicode Standard Annex #29]) and works well for most languages. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters:: -* <> -* <> -* <> (disabled by default) - [float] === Example output @@ -276,3 +263,44 @@ The above example produces the following terms: --------------------------- [ 2, quick, brown, foxes, jumpe, d, over, lazy, dog's, bone ] --------------------------- + +[float] +=== Definition + +The `standard` analyzer consists of: + +Tokenizer:: +* <> + +Token Filters:: +* <> +* <> +* <> (disabled by default) + +If you need to customize the `standard` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`standard` analyzer and you can use it as a starting point: + +[source,js] +---------------------------------------------------- +PUT /standard_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_standard": { + "tokenizer": "standard", + "filter": [ + "standard", + "lowercase" <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: standard_example, first: standard, second: rebuilt_standard}\nendyaml\n/] +<1> You'd add any token filters after `lowercase`. diff --git a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc index eacc7e106e799..1b84797d94761 100644 --- a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc @@ -5,17 +5,6 @@ The `stop` analyzer is the same as the <> - -Token filters:: -* <> - [float] === Example output @@ -239,3 +228,50 @@ The above example produces the following terms: --------------------------- [ quick, brown, foxes, jumped, lazy, dog, s, bone ] --------------------------- + +[float] +=== Definition + +It consists of: + +Tokenizer:: +* <> + +Token filters:: +* <> + +If you need to customize the `stop` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`stop` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /stop_example +{ + "settings": { + "analysis": { + "filter": { + "english_stop": { + "type": "stop", + "stopwords": "_english_" <1> + } + }, + "analyzer": { + "rebuilt_stop": { + "tokenizer": "lowercase", + "filter": [ + "english_stop" <2> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: stop_example, first: stop, second: rebuilt_stop}\nendyaml\n/] +<1> The default stopwords can be overridden with the `stopwords` + or `stopwords_path` parameters. +<2> You'd add any token filters after `english_stop`. diff --git a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc index f95e5c6e4ab65..31ba8d9ce8f24 100644 --- a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc @@ -4,14 +4,6 @@ The `whitespace` analyzer breaks text into terms whenever it encounters a whitespace character. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -120,3 +112,37 @@ The above sentence would produce the following terms: === Configuration The `whitespace` analyzer is not configurable. + +[float] +=== Definition + +It consists of: + +Tokenizer:: +* <> + +If you need to customize the `whitespace` analyzer then you need to +recreate it as a `custom` analyzer and modify it, usually by adding +token filters. This would recreate the built-in `whitespace` analyzer +and you can use it as a starting point for further customization: + +[source,js] +---------------------------------------------------- +PUT /whitespace_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_whitespace": { + "tokenizer": "whitespace", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: whitespace_example, first: whitespace, second: rebuilt_whitespace}\nendyaml\n/] +<1> You'd add any token filters here. diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index 6e881121a0f67..3da1c60db0552 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -235,8 +235,8 @@ The output from the above is: "timed_out": false, "took": $body.took, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 3cf1f8403e230..9b6861627be40 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -294,8 +294,8 @@ GET my_index/_search "took": $body.took, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index e2824bb528584..42216a9a0fc14 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -300,11 +300,7 @@ Responds: "indices": { "twitter": { "shards": { - "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "1": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "2": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "3": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], - "4": [{"state": "STARTED"}, {"state": "UNASSIGNED"}] + "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}] } } } diff --git a/docs/reference/cat/allocation.asciidoc b/docs/reference/cat/allocation.asciidoc index 3719758ff58e9..a9de182e3c00e 100644 --- a/docs/reference/cat/allocation.asciidoc +++ b/docs/reference/cat/allocation.asciidoc @@ -16,7 +16,7 @@ Might respond with: [source,txt] -------------------------------------------------- shards disk.indices disk.used disk.avail disk.total disk.percent host ip node - 5 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 + 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/CSUXak2/.+/ _cat] diff --git a/docs/reference/cat/health.asciidoc b/docs/reference/cat/health.asciidoc index ca2a1838adb02..5f053edf30866 100644 --- a/docs/reference/cat/health.asciidoc +++ b/docs/reference/cat/health.asciidoc @@ -14,7 +14,7 @@ GET /_cat/health?v [source,txt] -------------------------------------------------- epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent -1475871424 16:17:04 elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% +1475871424 16:17:04 elasticsearch green 1 1 1 1 0 0 0 0 - 100.0% -------------------------------------------------- // TESTRESPONSE[s/1475871424 16:17:04/\\d+ \\d+:\\d+:\\d+/] // TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] @@ -33,7 +33,7 @@ which looks like: [source,txt] -------------------------------------------------- cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent -elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% +elasticsearch green 1 1 1 1 0 0 0 0 - 100.0% -------------------------------------------------- // TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] diff --git a/docs/reference/cat/indices.asciidoc b/docs/reference/cat/indices.asciidoc index 3a50a836d0fdb..2a5b865fefa47 100644 --- a/docs/reference/cat/indices.asciidoc +++ b/docs/reference/cat/indices.asciidoc @@ -18,7 +18,7 @@ Might respond with: -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb -green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] @@ -81,7 +81,7 @@ Which looks like: -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb -green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b +green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] diff --git a/docs/reference/cat/segments.asciidoc b/docs/reference/cat/segments.asciidoc index 88fb18b363745..a4c2c54d8eefd 100644 --- a/docs/reference/cat/segments.asciidoc +++ b/docs/reference/cat/segments.asciidoc @@ -17,8 +17,8 @@ might look like: ["source","txt",subs="attributes,callouts"] -------------------------------------------------- index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound -test 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -test1 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true +test 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true +test1 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true -------------------------------------------------- // TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat] diff --git a/docs/reference/cluster/health.asciidoc b/docs/reference/cluster/health.asciidoc index 6cc99a25476d9..87c4e17f452ce 100644 --- a/docs/reference/cluster/health.asciidoc +++ b/docs/reference/cluster/health.asciidoc @@ -3,7 +3,7 @@ The cluster health API allows to get a very simple status on the health of the cluster. For example, on a quiet single node cluster with a single index -with 5 shards and one replica, this: +with one shard and one replica, this: [source,js] -------------------------------------------------- @@ -22,11 +22,11 @@ Returns this: "timed_out" : false, "number_of_nodes" : 1, "number_of_data_nodes" : 1, - "active_primary_shards" : 5, - "active_shards" : 5, + "active_primary_shards" : 1, + "active_shards" : 1, "relocating_shards" : 0, "initializing_shards" : 0, - "unassigned_shards" : 5, + "unassigned_shards" : 1, "delayed_unassigned_shards": 0, "number_of_pending_tasks" : 0, "number_of_in_flight_fetch": 0, diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index 937917823f5a6..d684be80c00b8 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -95,7 +95,7 @@ Replication is important for two primary reasons: To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact. -By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. +By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index. NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API. @@ -366,11 +366,11 @@ And the response: [source,txt] -------------------------------------------------- health status index uuid pri rep docs.count docs.deleted store.size pri.store.size -yellow open customer 95SQ4TSUT7mWBT7VNHH67A 5 1 0 0 260b 260b +yellow open customer 95SQ4TSUT7mWBT7VNHH67A 1 1 0 0 260b 260b -------------------------------------------------- // TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+\\.?\\d?k?b/ _cat] -The results of the second command tells us that we now have 1 index named customer and it has 5 primary shards and 1 replica (the defaults) and it contains 0 documents in it. +The results of the second command tells us that we now have one index named customer and it has one primary shard and one replica (the defaults) and it contains zero documents in it. You might also notice that the customer index has a yellow health tagged to it. Recall from our previous discussion that yellow means that some replicas are not (yet) allocated. The reason this happens for this index is because Elasticsearch by default created one replica for this index. Since we only have one node running at the moment, that one replica cannot yet be allocated (for high availability) until a later point in time when another node joins the cluster. Once that replica gets allocated onto a second node, the health status for this index will turn to green. diff --git a/docs/reference/glossary.asciidoc b/docs/reference/glossary.asciidoc index 53164d366cd93..c6b9309fa3240 100644 --- a/docs/reference/glossary.asciidoc +++ b/docs/reference/glossary.asciidoc @@ -105,12 +105,13 @@ you index a document, it is indexed first on the primary shard, then on all <> of the primary shard. + - By default, an <> has 5 primary shards. You can - specify fewer or more primary shards to scale the number of - <> that your index can handle. + By default, an <> has one primary shard. You can specify + more primary shards to scale the number of <> + that your index can handle. + - You cannot change the number of primary shards in an index, once the - index is created. + You cannot change the number of primary shards in an index, once the index is + index is created. However, an index can be split into a new index using the + <>. + See also <> diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index 4e12dfd7ecad4..37901cb3abe62 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -78,31 +78,31 @@ GET index/_search "took": 2, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 2, - "max_score": 0.2876821, + "max_score": 0.18232156, "hits": [ { "_index": "index", "_type": "_doc", - "_id": "2", - "_score": 0.2876821, + "_id": "1", + "_score": 0.18232156, "_source": { - "body": "A pair of skis" + "body": "Ski resort" } }, { "_index": "index", "_type": "_doc", - "_id": "1", - "_score": 0.2876821, + "_id": "2", + "_score": 0.18232156, "_source": { - "body": "Ski resort" + "body": "A pair of skis" } } ] @@ -136,20 +136,20 @@ GET index/_search "took": 1, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.2876821, + "max_score": 0.80259144, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.2876821, + "_score": 0.80259144, "_source": { "body": "Ski resort" } @@ -193,20 +193,20 @@ GET index/_search "took": 2, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.2876821, + "max_score": 0.80259144, "hits": [ { "_index": "index", "_type": "_doc", "_id": "1", - "_score": 0.2876821, + "_score": 0.80259144, "_source": { "body": "Ski resort" } diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index ed0077a629d7c..54c0c1c1b157c 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -103,9 +103,14 @@ specific index module: `index.auto_expand_replicas`:: - Auto-expand the number of replicas based on the number of available nodes. + Auto-expand the number of replicas based on the number of data nodes in the cluster. Set to a dash delimited lower and upper bound (e.g. `0-5`) or use `all` - for the upper bound (e.g. `0-all`). Defaults to `false` (i.e. disabled). + for the upper bound (e.g. `0-all`). Defaults to `false` (i.e. disabled). + Note that the auto-expanded number of replicas does not take any other allocation + rules into account, such as <>, + <> or <>, + and this can lead to the cluster health becoming `YELLOW` if the applicable rules + prevent all the replicas from being allocated. `index.search.idle.after`:: How long a shard can not receive a search or get request until it's considered diff --git a/docs/reference/index-shared4.asciidoc b/docs/reference/index-shared4.asciidoc index 5e6ebc8a5a20c..3dfb3b641890f 100644 --- a/docs/reference/index-shared4.asciidoc +++ b/docs/reference/index-shared4.asciidoc @@ -5,4 +5,6 @@ include::testing.asciidoc[] include::glossary.asciidoc[] +include::release-notes/highlights.asciidoc[] + include::{docdir}/../CHANGELOG.asciidoc[] \ No newline at end of file diff --git a/docs/reference/indices/flush.asciidoc b/docs/reference/indices/flush.asciidoc index db1f7c2fe00a9..8583afc96ab1f 100644 --- a/docs/reference/indices/flush.asciidoc +++ b/docs/reference/indices/flush.asciidoc @@ -106,11 +106,7 @@ which returns something similar to: "num_docs" : 0 } } - ], - "1": ..., - "2": ..., - "3": ..., - "4": ... + ] } } } @@ -120,10 +116,6 @@ which returns something similar to: // TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] // TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] // TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] -// TESTRESPONSE[s/"1": \.\.\./"1": $body.indices.twitter.shards.1/] -// TESTRESPONSE[s/"2": \.\.\./"2": $body.indices.twitter.shards.2/] -// TESTRESPONSE[s/"3": \.\.\./"3": $body.indices.twitter.shards.3/] -// TESTRESPONSE[s/"4": \.\.\./"4": $body.indices.twitter.shards.4/] <1> the `sync id` marker [float] diff --git a/docs/reference/indices/shrink-index.asciidoc b/docs/reference/indices/shrink-index.asciidoc index 496ae7253ce9c..34e90e6799d78 100644 --- a/docs/reference/indices/shrink-index.asciidoc +++ b/docs/reference/indices/shrink-index.asciidoc @@ -42,7 +42,7 @@ PUT /my_source_index/_settings } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT my_source_index\n/] +// TEST[s/^/PUT my_source_index\n{"settings":{"index.number_of_shards":2}}\n/] <1> Forces the relocation of a copy of each shard to the node with name `shrink_node_name`. See <> for more options. @@ -119,7 +119,7 @@ POST my_source_index/_shrink/my_target_index?copy_settings=true } -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true}}\n/] +// TEST[s/^/PUT my_source_index\n{"settings": {"index.number_of_shards":5,"index.blocks.write": true}}\n/] <1> The number of shards in the target index. This must be a factor of the number of shards in the source index. diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 723f79c5dc499..3688a0e945414 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -83,31 +83,31 @@ both index and query time. "took": $body.took, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 2, - "max_score": 0.2876821, + "max_score": 0.47000363, "hits": [ { "_index": "index", "_type": "_doc", - "_id": "2", - "_score": 0.2876821, + "_id": "1", + "_score": 0.47000363, "_source": { - "foo": "bar" + "foo": "BÀR" } }, { "_index": "index", "_type": "_doc", - "_id": "1", - "_score": 0.2876821, + "_id": "2", + "_score": 0.47000363, "_source": { - "foo": "BÀR" + "foo": "bar" } } ] @@ -144,8 +144,8 @@ returns "took": 43, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index b5226b53ba0c7..066d3ce1ac597 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -194,8 +194,8 @@ now returns matches from the new index: "took": 3, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -389,8 +389,8 @@ This results in a response like this: "took": 6, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -549,8 +549,8 @@ GET /my_queries1/_search "took": 6, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped": 0, "failed": 0 }, diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index a1b427acf2718..21a689703e01e 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -329,3 +329,16 @@ and will not match any documents for this query. This can be useful when querying multiple indexes which might have different mappings. When set to `false` (the default value) the query will throw an exception if the field is not mapped. + +[float] +==== Notes on Precision + +Geopoints have limited precision and are always rounded down during index time. +During the query time, upper boundaries of the bounding boxes are rounded down, +while lower boundaries are rounded up. As a result, the points along on the +lower bounds (bottom and left edges of the bounding box) might not make it into +the bounding box due to the rounding error. At the same time points alongside +the upper bounds (top and right edges) might be selected by the query even if +they are located slightly outside the edge. The rounding error should be less +than 4.20e-8 degrees on the latitude and less than 8.39e-8 degrees on the +longitude, which translates to less than 1cm error even at the equator. diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index b6e465e34dfd4..0d2661c37b862 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -83,8 +83,8 @@ The above request will yield the following response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -227,8 +227,8 @@ GET /my-index/_search "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -299,7 +299,7 @@ Index response: "failed": 0 }, "result": "created", - "_seq_no" : 0, + "_seq_no" : 1, "_primary_term" : 1 } -------------------------------------------------- @@ -407,8 +407,8 @@ This will yield the following response. "took": 7, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -512,8 +512,8 @@ The slightly different response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, @@ -608,8 +608,8 @@ The above search request returns a response similar to this: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 35bc17e1f0fac..29b349c3b7adb 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -68,20 +68,20 @@ Response: "took": 13, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "skipped" : 0, "failed": 0 }, "hits": { "total": 1, - "max_score": 0.5753642, + "max_score": 0.87546873, "hits": [ { "_index": "my-index", "_type": "_doc", "_id": "2", - "_score": 0.5753642, + "_score": 0.87546873, "_source": { "codes": ["def", "ghi"], "required_matches": 2 diff --git a/docs/reference/release-notes/highlights-7.0.0.asciidoc b/docs/reference/release-notes/highlights-7.0.0.asciidoc new file mode 100644 index 0000000000000..1ea3d3fa3291e --- /dev/null +++ b/docs/reference/release-notes/highlights-7.0.0.asciidoc @@ -0,0 +1,9 @@ +[[release-highlights-7.0.0]] +== 7.0.0 release highlights +++++ +7.0.0 +++++ + +coming[7.0.0] + +See also <> and <>. diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc new file mode 100644 index 0000000000000..1223e9a685a27 --- /dev/null +++ b/docs/reference/release-notes/highlights.asciidoc @@ -0,0 +1,13 @@ +[[release-highlights]] += {es} Release Highlights + +[partintro] +-- +This section summarizes the most important changes in each release. For the +full list, see <> and <>. + +* <> + +-- + +include::highlights-7.0.0.asciidoc[] \ No newline at end of file diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index f1c6cf7c573f9..5c01fa53d45ec 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -37,8 +37,8 @@ tweets from the `twitter` index for a certain user. The result is: { "count" : 1, "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 } diff --git a/docs/reference/search/search-shards.asciidoc b/docs/reference/search/search-shards.asciidoc index 1a7c45545769a..90ee35afa6172 100644 --- a/docs/reference/search/search-shards.asciidoc +++ b/docs/reference/search/search-shards.asciidoc @@ -18,7 +18,7 @@ Full example: GET /twitter/_search_shards -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT twitter\n/] +// TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] This will yield the following result: @@ -103,7 +103,7 @@ And specifying the same request, this time with a routing value: GET /twitter/_search_shards?routing=foo,bar -------------------------------------------------- // CONSOLE -// TEST[s/^/PUT twitter\n/] +// TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/] This will yield the following result: diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index e3101a5dfb438..9f9833bde9d5c 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -177,8 +177,8 @@ returns this response: -------------------------------------------------- { "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 }, @@ -251,8 +251,8 @@ Which should look like: "took": 6, "timed_out": false, "_shards" : { - "total" : 5, - "successful" : 5, + "total" : 1, + "successful" : 1, "skipped" : 0, "failed" : 0 }, diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index 2c0c8821355a7..20894e5773a37 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -218,8 +218,8 @@ Response: { "valid": true, "_shards": { - "total": 5, - "successful": 5, + "total": 1, + "successful": 1, "failed": 0 }, "explanations": [ @@ -227,31 +227,7 @@ Response: "index": "twitter", "shard": 0, "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 1, - "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 2, - "valid": true, - "explanation": "user:kimchy~2" - }, - { - "index": "twitter", - "shard": 3, - "valid": true, - "explanation": "(user:kimchi)^0.8333333" - }, - { - "index": "twitter", - "shard": 4, - "valid": true, - "explanation": "user:kimchy" + "explanation": "(user:kimchi)^0.8333333 user:kimchy" } ] } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java index 8e0828fcfcaea..22875139c9beb 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java @@ -57,7 +57,7 @@ public class RankEvalSpec implements Writeable, ToXContentObject { /** Default max number of requests. */ private static final int MAX_CONCURRENT_SEARCHES = 10; /** optional: Templates to base test requests on */ - private Map templates = new HashMap<>(); + private final Map templates = new HashMap<>(); public RankEvalSpec(List ratedRequests, EvaluationMetric metric, Collection templates) { this.metric = Objects.requireNonNull(metric, "Cannot evaluate ranking if no evaluation metric is provided."); @@ -68,8 +68,8 @@ public RankEvalSpec(List ratedRequests, EvaluationMetric metric, C this.ratedRequests = ratedRequests; if (templates == null || templates.isEmpty()) { for (RatedRequest request : ratedRequests) { - if (request.getTestRequest() == null) { - throw new IllegalStateException("Cannot evaluate ranking if neither template nor test request is " + if (request.getEvaluationRequest() == null) { + throw new IllegalStateException("Cannot evaluate ranking if neither template nor evaluation request is " + "provided. Seen for request id: " + request.getId()); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java index 392ce5d0633a0..79dd693b3ac3c 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RatedRequest.java @@ -75,9 +75,12 @@ public class RatedRequest implements Writeable, ToXContentObject { private final String id; private final List summaryFields; private final List ratedDocs; - // Search request to execute for this rated request. This can be null if template and corresponding parameters are supplied. + /** + * Search request to execute for this rated request. This can be null in + * case the query is supplied as a template with corresponding parameters + */ @Nullable - private SearchSourceBuilder testRequest; + private final SearchSourceBuilder evaluationRequest; /** * Map of parameters to use for filling a query template, can be used * instead of providing testRequest. @@ -86,27 +89,49 @@ public class RatedRequest implements Writeable, ToXContentObject { @Nullable private String templateId; - private RatedRequest(String id, List ratedDocs, SearchSourceBuilder testRequest, + /** + * Create a rated request with template ids and parameters. + * + * @param id a unique name for this rated request + * @param ratedDocs a list of document ratings + * @param params template parameters + * @param templateId a templare id + */ + public RatedRequest(String id, List ratedDocs, Map params, + String templateId) { + this(id, ratedDocs, null, params, templateId); + } + + /** + * Create a rated request using a {@link SearchSourceBuilder} to define the + * evaluated query. + * + * @param id a unique name for this rated request + * @param ratedDocs a list of document ratings + * @param evaluatedQuery the query that is evaluated + */ + public RatedRequest(String id, List ratedDocs, SearchSourceBuilder evaluatedQuery) { + this(id, ratedDocs, evaluatedQuery, new HashMap<>(), null); + } + + private RatedRequest(String id, List ratedDocs, SearchSourceBuilder evaluatedQuery, Map params, String templateId) { - if (params != null && (params.size() > 0 && testRequest != null)) { + if (params != null && (params.size() > 0 && evaluatedQuery != null)) { throw new IllegalArgumentException( - "Ambiguous rated request: Set both, verbatim test request and test request " - + "template parameters."); + "Ambiguous rated request: Set both, verbatim test request and test request " + "template parameters."); } - if (templateId != null && testRequest != null) { + if (templateId != null && evaluatedQuery != null) { throw new IllegalArgumentException( - "Ambiguous rated request: Set both, verbatim test request and test request " - + "template parameters."); + "Ambiguous rated request: Set both, verbatim test request and test request " + "template parameters."); } - if ((params == null || params.size() < 1) && testRequest == null) { - throw new IllegalArgumentException( - "Need to set at least test request or test request template parameters."); + if ((params == null || params.size() < 1) && evaluatedQuery == null) { + throw new IllegalArgumentException("Need to set at least test request or test request template parameters."); } if ((params != null && params.size() > 0) && templateId == null) { - throw new IllegalArgumentException( - "If template parameters are supplied need to set id of template to apply " - + "them to too."); + throw new IllegalArgumentException("If template parameters are supplied need to set id of template to apply " + "them to too."); } + validateEvaluatedQuery(evaluatedQuery); + // check that not two documents with same _index/id are specified Set docKeys = new HashSet<>(); for (RatedDocument doc : ratedDocs) { @@ -118,7 +143,7 @@ private RatedRequest(String id, List ratedDocs, SearchSourceBuild } this.id = id; - this.testRequest = testRequest; + this.evaluationRequest = evaluatedQuery; this.ratedDocs = new ArrayList<>(ratedDocs); if (params != null) { this.params = new HashMap<>(params); @@ -129,18 +154,30 @@ private RatedRequest(String id, List ratedDocs, SearchSourceBuild this.summaryFields = new ArrayList<>(); } - public RatedRequest(String id, List ratedDocs, Map params, - String templateId) { - this(id, ratedDocs, null, params, templateId); - } - - public RatedRequest(String id, List ratedDocs, SearchSourceBuilder testRequest) { - this(id, ratedDocs, testRequest, new HashMap<>(), null); + static void validateEvaluatedQuery(SearchSourceBuilder evaluationRequest) { + // ensure that testRequest, if set, does not contain aggregation, suggest or highlighting section + if (evaluationRequest != null) { + if (evaluationRequest.suggest() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain a suggest section."); + } + if (evaluationRequest.aggregations() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain aggregations."); + } + if (evaluationRequest.highlighter() != null) { + throw new IllegalArgumentException("Query in rated requests should not contain a highlighter section."); + } + if (evaluationRequest.explain() != null && evaluationRequest.explain()) { + throw new IllegalArgumentException("Query in rated requests should not use explain."); + } + if (evaluationRequest.profile()) { + throw new IllegalArgumentException("Query in rated requests should not use profile."); + } + } } - public RatedRequest(StreamInput in) throws IOException { + RatedRequest(StreamInput in) throws IOException { this.id = in.readString(); - testRequest = in.readOptionalWriteable(SearchSourceBuilder::new); + evaluationRequest = in.readOptionalWriteable(SearchSourceBuilder::new); int intentSize = in.readInt(); ratedDocs = new ArrayList<>(intentSize); @@ -159,7 +196,7 @@ public RatedRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - out.writeOptionalWriteable(testRequest); + out.writeOptionalWriteable(evaluationRequest); out.writeInt(ratedDocs.size()); for (RatedDocument ratedDoc : ratedDocs) { @@ -173,8 +210,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(this.templateId); } - public SearchSourceBuilder getTestRequest() { - return testRequest; + public SearchSourceBuilder getEvaluationRequest() { + return evaluationRequest; } /** return the user supplied request id */ @@ -240,8 +277,8 @@ public static RatedRequest fromXContent(XContentParser parser) { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); builder.field(ID_FIELD.getPreferredName(), this.id); - if (testRequest != null) { - builder.field(REQUEST_FIELD.getPreferredName(), this.testRequest); + if (evaluationRequest != null) { + builder.field(REQUEST_FIELD.getPreferredName(), this.evaluationRequest); } builder.startArray(RATINGS_FIELD.getPreferredName()); for (RatedDocument doc : this.ratedDocs) { @@ -285,7 +322,7 @@ public final boolean equals(Object obj) { RatedRequest other = (RatedRequest) obj; - return Objects.equals(id, other.id) && Objects.equals(testRequest, other.testRequest) + return Objects.equals(id, other.id) && Objects.equals(evaluationRequest, other.evaluationRequest) && Objects.equals(summaryFields, other.summaryFields) && Objects.equals(ratedDocs, other.ratedDocs) && Objects.equals(params, other.params) @@ -294,7 +331,7 @@ public final boolean equals(Object obj) { @Override public final int hashCode() { - return Objects.hash(id, testRequest, summaryFields, ratedDocs, params, + return Objects.hash(id, evaluationRequest, summaryFields, ratedDocs, params, templateId); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java index 019ae274466ab..e0a0b3ea13378 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/TransportRankEvalAction.java @@ -52,6 +52,7 @@ import java.util.concurrent.ConcurrentHashMap; import static org.elasticsearch.common.xcontent.XContentHelper.createParser; +import static org.elasticsearch.index.rankeval.RatedRequest.validateEvaluatedQuery; /** * Instances of this class execute a collection of search intents (read: user @@ -99,15 +100,17 @@ protected void doExecute(RankEvalRequest request, ActionListener ratedRequestsInSearch = new ArrayList<>(); for (RatedRequest ratedRequest : ratedRequests) { - SearchSourceBuilder ratedSearchSource = ratedRequest.getTestRequest(); - if (ratedSearchSource == null) { + SearchSourceBuilder evaluationRequest = ratedRequest.getEvaluationRequest(); + if (evaluationRequest == null) { Map params = ratedRequest.getParams(); String templateId = ratedRequest.getTemplateId(); TemplateScript.Factory templateScript = scriptsWithoutParams.get(templateId); String resolvedRequest = templateScript.newInstance(params).execute(); try (XContentParser subParser = createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), XContentType.JSON)) { - ratedSearchSource = SearchSourceBuilder.fromXContent(subParser, false); + evaluationRequest = SearchSourceBuilder.fromXContent(subParser, false); + // check for parts that should not be part of a ranking evaluation request + validateEvaluatedQuery(evaluationRequest); } catch (IOException e) { // if we fail parsing, put the exception into the errors map and continue errors.put(ratedRequest.getId(), e); @@ -116,17 +119,17 @@ LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), XContentTyp } if (metric.forcedSearchSize().isPresent()) { - ratedSearchSource.size(metric.forcedSearchSize().get()); + evaluationRequest.size(metric.forcedSearchSize().get()); } ratedRequestsInSearch.add(ratedRequest); List summaryFields = ratedRequest.getSummaryFields(); if (summaryFields.isEmpty()) { - ratedSearchSource.fetchSource(false); + evaluationRequest.fetchSource(false); } else { - ratedSearchSource.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]); + evaluationRequest.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]); } - SearchRequest searchRequest = new SearchRequest(request.indices(), ratedSearchSource); + SearchRequest searchRequest = new SearchRequest(request.indices(), evaluationRequest); searchRequest.indicesOptions(request.indicesOptions()); msearchRequest.add(searchRequest); } diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 196b50b7f6163..084f29b8c9a87 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -33,7 +33,11 @@ import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; +import org.elasticsearch.search.suggest.SuggestBuilder; +import org.elasticsearch.search.suggest.SuggestBuilders; import org.elasticsearch.test.ESTestCase; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -165,7 +169,7 @@ public void testEqualsAndHash() throws IOException { private static RatedRequest mutateTestItem(RatedRequest original) { String id = original.getId(); - SearchSourceBuilder testRequest = original.getTestRequest(); + SearchSourceBuilder evaluationRequest = original.getEvaluationRequest(); List ratedDocs = original.getRatedDocs(); Map params = original.getParams(); List summaryFields = original.getSummaryFields(); @@ -177,11 +181,11 @@ private static RatedRequest mutateTestItem(RatedRequest original) { id = randomValueOtherThan(id, () -> randomAlphaOfLength(10)); break; case 1: - if (testRequest != null) { - int size = randomValueOtherThan(testRequest.size(), () -> randomInt(Integer.MAX_VALUE)); - testRequest = new SearchSourceBuilder(); - testRequest.size(size); - testRequest.query(new MatchAllQueryBuilder()); + if (evaluationRequest != null) { + int size = randomValueOtherThan(evaluationRequest.size(), () -> randomInt(Integer.MAX_VALUE)); + evaluationRequest = new SearchSourceBuilder(); + evaluationRequest.size(size); + evaluationRequest.query(new MatchAllQueryBuilder()); } else { if (randomBoolean()) { Map mutated = new HashMap<>(); @@ -204,10 +208,10 @@ private static RatedRequest mutateTestItem(RatedRequest original) { } RatedRequest ratedRequest; - if (testRequest == null) { + if (evaluationRequest == null) { ratedRequest = new RatedRequest(id, ratedDocs, params, templateId); } else { - ratedRequest = new RatedRequest(id, ratedDocs, testRequest); + ratedRequest = new RatedRequest(id, ratedDocs, evaluationRequest); } ratedRequest.addSummaryFields(summaryFields); @@ -258,6 +262,44 @@ public void testSettingTemplateIdNoParamsThrows() { expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, null, "templateId")); } + public void testAggsNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.aggregation(AggregationBuilders.terms("fieldName")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain aggregations.", e.getMessage()); + } + + public void testSuggestionsNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.suggest(new SuggestBuilder().addSuggestion("id", SuggestBuilders.completionSuggestion("fieldname"))); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain a suggest section.", e.getMessage()); + } + + public void testHighlighterNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + SearchSourceBuilder query = new SearchSourceBuilder(); + query.highlighter(new HighlightBuilder().field("field")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query)); + assertEquals("Query in rated requests should not contain a highlighter section.", e.getMessage()); + } + + public void testExplainNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new RatedRequest("id", ratedDocs, new SearchSourceBuilder().explain(true))); + assertEquals("Query in rated requests should not use explain.", e.getMessage()); + } + + public void testProfileNotAllowed() { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> new RatedRequest("id", ratedDocs, new SearchSourceBuilder().profile(true))); + assertEquals("Query in rated requests should not use profile.", e.getMessage()); + } + /** * test that modifying the order of index/docId to make sure it doesn't * matter for parsing xContent @@ -287,7 +329,7 @@ public void testParseFromXContent() throws IOException { try (XContentParser parser = createParser(JsonXContent.jsonXContent, querySpecString)) { RatedRequest specification = RatedRequest.fromXContent(parser); assertEquals("my_qa_query", specification.getId()); - assertNotNull(specification.getTestRequest()); + assertNotNull(specification.getEvaluationRequest()); List ratedDocs = specification.getRatedDocs(); assertEquals(3, ratedDocs.size()); for (int i = 0; i < 3; i++) { diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml index 605891d2b32d3..70e78f7e36b37 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/35_search_failures.yml @@ -1,5 +1,12 @@ --- "Response format for search failures": + - do: + indices.create: + index: source + body: + settings: + index.number_of_shards: 2 + - do: index: index: source @@ -26,7 +33,7 @@ - match: {updated: 0} - match: {version_conflicts: 0} - match: {batches: 0} - - is_true: failures.0.shard + - match: {failures.0.shard: 0} - match: {failures.0.index: source} - is_true: failures.0.node - match: {failures.0.reason.type: script_exception} diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml index 8ace77eee59eb..17f422453ce18 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/35_search_failure.yml @@ -1,5 +1,12 @@ --- "Response format for search failures": + - do: + indices.create: + index: source + body: + settings: + index.number_of_shards: 2 + - do: index: index: source @@ -22,7 +29,7 @@ - match: {updated: 0} - match: {version_conflicts: 0} - match: {batches: 0} - - is_true: failures.0.shard + - match: {failures.0.shard: 0} - match: {failures.0.index: source} - is_true: failures.0.node - match: {failures.0.reason.type: script_exception} diff --git a/modules/repository-url/build.gradle b/modules/repository-url/build.gradle index 79fe5e7aaefa7..62aad486ad804 100644 --- a/modules/repository-url/build.gradle +++ b/modules/repository-url/build.gradle @@ -34,6 +34,7 @@ File repositoryDir = new File(project.buildDir, "shared-repository") /** A task to start the URLFixture which exposes the repositoryDir over HTTP **/ task urlFixture(type: AntFixture) { + dependsOn testClasses doFirst { repositoryDir.mkdirs() } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 7726a1df0b10d..8617ecc1fe28a 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -161,7 +161,7 @@ search: index: my_remote_cluster:aliased_test_index,my_remote_cluster:field_caps_index_1 - - match: { _shards.total: 8 } + - match: { _shards.total: 4 } - match: { hits.total: 2 } - match: { hits.hits.0._source.filter_field: 1 } - match: { hits.hits.0._index: "my_remote_cluster:test_index" } diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml index d37bb5a182586..c2840c1ce98e8 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/remote_cluster/10_basic.yml @@ -27,6 +27,8 @@ indices.create: index: field_caps_index_1 body: + settings: + index.number_of_shards: 1 mappings: t: properties: @@ -51,6 +53,8 @@ indices.create: index: field_caps_index_3 body: + settings: + index.number_of_shards: 1 mappings: t: properties: diff --git a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java index 50860ddd87b21..0ad78ad0c7a7e 100644 --- a/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java +++ b/qa/smoke-test-rank-eval-with-mustache/src/test/java/org/elasticsearch/index/rankeval/SmokeMultipleTemplatesIT.java @@ -30,6 +30,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -106,6 +107,43 @@ public void testPrecisionAtRequest() throws IOException { assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE); } + public void testTemplateWithAggsFails() { + String template = "{ \"aggs\" : { \"avg_grade\" : { \"avg\" : { \"field\" : \"grade\" }}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain aggregations."); + } + + public void testTemplateWithSuggestFails() { + String template = "{\"suggest\" : {\"my-suggestion\" : {\"text\" : \"Elastic\",\"term\" : {\"field\" : \"message\"}}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain a suggest section."); + } + + public void testTemplateWithHighlighterFails() { + String template = "{\"highlight\" : { \"fields\" : {\"content\" : {}}}}"; + assertTemplatedRequestFailures(template, "Query in rated requests should not contain a highlighter section."); + } + + public void testTemplateWithProfileFails() { + String template = "{\"profile\" : \"true\" }"; + assertTemplatedRequestFailures(template, "Query in rated requests should not use profile."); + } + + public void testTemplateWithExplainFails() { + String template = "{\"explain\" : \"true\" }"; + assertTemplatedRequestFailures(template, "Query in rated requests should not use explain."); + } + + private static void assertTemplatedRequestFailures(String template, String expectedMessage) { + List ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1)); + RatedRequest ratedRequest = new RatedRequest("id", ratedDocs, Collections.singletonMap("param1", "value1"), "templateId"); + Collection templates = Collections.singletonList(new ScriptWithId("templateId", + new Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, template, Collections.emptyMap()))); + RankEvalSpec rankEvalSpec = new RankEvalSpec(Collections.singletonList(ratedRequest), new PrecisionAtK(), templates); + RankEvalRequest rankEvalRequest = new RankEvalRequest(rankEvalSpec, new String[] { "test" }); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> client().execute(RankEvalAction.INSTANCE, rankEvalRequest).actionGet()); + assertEquals(expectedMessage, e.getMessage()); + } + private static List createRelevant(String... docs) { List relevant = new ArrayList<>(); for (String doc : docs) { diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml index 403b0b740c533..78b7a4277570a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cat.templates/10_basic.yml @@ -14,6 +14,8 @@ --- "No templates": + - skip: + features: default_shards - do: cat.templates: {} @@ -174,6 +176,8 @@ --- "Sort templates": + - skip: + features: default_shards - do: indices.put_template: name: test @@ -222,6 +226,8 @@ --- "Multiple template": + - skip: + features: default_shards - do: indices.put_template: name: test_1 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index 4d98eade8f709..f94cf286fd898 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -1,8 +1,8 @@ --- "Shrink index via API": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # creates an index with one document solely allocated on the master node # and shrinks it into a new index with a single shard @@ -24,7 +24,8 @@ settings: # ensure everything is allocated on a single node index.routing.allocation.include._id: $master - number_of_replicas: 0 + index.number_of_shards: 2 + index.number_of_replicas: 0 - do: index: index: source diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index 07b3515b50c9e..6f532ff81c688 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -1,8 +1,8 @@ --- "Shrink index ignores target template mapping": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: @@ -20,7 +20,8 @@ settings: # ensure everything is allocated on a single node index.routing.allocation.include._id: $master - number_of_replicas: 0 + index.number_of_shards: 2 + index.number_of_replicas: 0 mappings: test: properties: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 6e595921d7f6e..53a12aad787f7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during shrink index": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: @@ -19,6 +19,7 @@ settings: # ensure everything is allocated on the master node index.routing.allocation.include._id: $master + index.number_of_shards: 2 index.number_of_replicas: 0 index.merge.scheduler.max_merge_count: 4 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 635673c182f2f..4f645d3eb3e0b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -33,8 +33,8 @@ setup: --- "Split index via API": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # make it read-only @@ -110,8 +110,8 @@ setup: # when re-enabling uncomment the below skips version: "all" reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" - # version: " - 6.99.99" - # reason: expects warnings that pre-7.0.0 will not send + # version: " - 6.3.99" + # reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: indices.create: @@ -213,8 +213,8 @@ setup: --- "Create illegal split indices": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # try to do an illegal split with number_of_routing_shards set diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 433ac040dd1e4..4bac4bf5b0807 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -4,8 +4,8 @@ # when re-enabling uncomment the below skips version: "all" reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" - # version: " - 6.99.99" - # reason: expects warnings that pre-7.0.0 will not send + # version: " - 6.3.99" + # reason: expects warnings that pre-6.4.0 will not send features: "warnings" # create index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index e0ace991f4f0d..9e64b2b8130ad 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during split index": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml index 86c6c632d5e94..f8d960e0c2597 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/240_max_buckets.yml @@ -17,14 +17,14 @@ setup: index: test type: doc id: 1 - body: { "date": "2014-03-03T00:00:00", "keyword": "foo" } + body: { "date": "2014-03-03T00:00:00", "keyword": "dgx" } - do: index: index: test type: doc id: 2 - body: { "date": "2015-03-03T00:00:00", "keyword": "bar" } + body: { "date": "2015-03-03T00:00:00", "keyword": "dfs" } - do: index: @@ -38,7 +38,36 @@ setup: index: test type: doc id: 4 - body: { "date": "2017-03-03T00:00:00" } + body: { "date": "2017-03-03T00:00:00", "keyword": "foo" } + + - do: + index: + index: test + type: doc + id: 5 + body: { "date": "2018-03-03T00:00:00", "keyword": "bar" } + + - do: + index: + index: test + type: doc + id: 6 + body: { "date": "2019-03-03T00:00:00", "keyword": "baz" } + + - do: + index: + index: test + type: doc + id: 7 + body: { "date": "2020-03-03T00:00:00", "keyword": "qux" } + + - do: + index: + index: test + type: doc + id: 8 + body: { "date": "2021-03-03T00:00:00", "keyword": "quux" } + - do: indices.refresh: diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 42ff432240381..fa4d751a54aed 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -253,7 +253,6 @@ import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction; -import org.elasticsearch.rest.action.admin.indices.RestGetAllSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction; @@ -558,7 +557,6 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestGetAllAliasesAction(settings, restController)); registerHandler.accept(new RestGetAllMappingsAction(settings, restController)); - registerHandler.accept(new RestGetAllSettingsAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index e510c0719df2d..ca046c48accff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -101,8 +101,6 @@ public void readFrom(StreamInput in) throws IOException { } if (in.getVersion().before(Version.V_6_4_0)) { copySettings = null; - } else if (in.getVersion().onOrAfter(Version.V_6_4_0) && in.getVersion().before(Version.V_7_0_0_alpha1)){ - copySettings = in.readBoolean(); } else { copySettings = in.readOptionalBoolean(); } @@ -116,10 +114,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { out.writeEnum(type); } + // noinspection StatementWithEmptyBody if (out.getVersion().before(Version.V_6_4_0)) { - } else if (out.getVersion().onOrAfter(Version.V_6_4_0) && out.getVersion().before(Version.V_7_0_0_alpha1)) { - out.writeBoolean(copySettings == null ? false : copySettings); } else { out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 166aad3ecaa12..0d8a374e66d42 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -366,8 +366,14 @@ public ClusterState execute(ClusterState currentState) throws Exception { } // now, put the request settings, so they override templates indexSettingsBuilder.put(request.settings()); + if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) { + DiscoveryNodes nodes = currentState.nodes(); + final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion()); + indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion); + } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) { - indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5)); + final int numberOfShards = getNumberOfShards(indexSettingsBuilder); + indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, numberOfShards)); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); @@ -376,12 +382,6 @@ public ClusterState execute(ClusterState currentState) throws Exception { indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS)); } - if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) { - DiscoveryNodes nodes = currentState.nodes(); - final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion()); - indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion); - } - if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) { indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis()); } @@ -573,6 +573,18 @@ public ClusterState execute(ClusterState currentState) throws Exception { } } + static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) { + // TODO: this logic can be removed when the current major version is 8 + assert Version.CURRENT.major == 7; + final int numberOfShards; + if (Version.fromId(Integer.parseInt(indexSettingsBuilder.get(SETTING_VERSION_CREATED))).before(Version.V_7_0_0_alpha1)) { + numberOfShards = 5; + } else { + numberOfShards = 1; + } + return numberOfShards; + } + @Override public void onFailure(String source, Exception e) { if (e instanceof ResourceAlreadyExistsException) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index deb10b83b5a5d..569ddd6cee772 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -114,11 +114,24 @@ public ClusterState applyStartedShards(ClusterState clusterState, List> entries = new SetOnce<>(); + private volatile boolean closed; private KeyStoreWrapper(int formatVersion, boolean hasPassword, byte[] dataBytes) { this.formatVersion = formatVersion; @@ -448,8 +449,8 @@ private void decryptLegacyEntries() throws GeneralSecurityException, IOException } /** Write the keystore to the given config directory. */ - public void save(Path configDir, char[] password) throws Exception { - assert isLoaded(); + public synchronized void save(Path configDir, char[] password) throws Exception { + ensureOpen(); SimpleFSDirectory directory = new SimpleFSDirectory(configDir); // write to tmp file first, then overwrite @@ -500,16 +501,22 @@ public void save(Path configDir, char[] password) throws Exception { } } + /** + * It is possible to retrieve the setting names even if the keystore is closed. + * This allows {@link SecureSetting} to correctly determine that a entry exists even though it cannot be read. Thus attempting to + * read a secure setting after the keystore is closed will generate a "keystore is closed" exception rather than using the fallback + * setting. + */ @Override public Set getSettingNames() { - assert isLoaded(); + assert entries.get() != null : "Keystore is not loaded"; return entries.get().keySet(); } // TODO: make settings accessible only to code that registered the setting @Override - public SecureString getString(String setting) { - assert isLoaded(); + public synchronized SecureString getString(String setting) { + ensureOpen(); Entry entry = entries.get().get(setting); if (entry == null || entry.type != EntryType.STRING) { throw new IllegalArgumentException("Secret setting " + setting + " is not a string"); @@ -520,13 +527,12 @@ public SecureString getString(String setting) { } @Override - public InputStream getFile(String setting) { - assert isLoaded(); + public synchronized InputStream getFile(String setting) { + ensureOpen(); Entry entry = entries.get().get(setting); if (entry == null || entry.type != EntryType.FILE) { throw new IllegalArgumentException("Secret setting " + setting + " is not a file"); } - return new ByteArrayInputStream(entry.bytes); } @@ -543,8 +549,8 @@ public static void validateSettingName(String setting) { } /** Set a string setting. */ - void setString(String setting, char[] value) { - assert isLoaded(); + synchronized void setString(String setting, char[] value) { + ensureOpen(); validateSettingName(setting); ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(CharBuffer.wrap(value)); @@ -556,8 +562,8 @@ void setString(String setting, char[] value) { } /** Set a file setting. */ - void setFile(String setting, byte[] bytes) { - assert isLoaded(); + synchronized void setFile(String setting, byte[] bytes) { + ensureOpen(); validateSettingName(setting); Entry oldEntry = entries.get().put(setting, new Entry(EntryType.FILE, Arrays.copyOf(bytes, bytes.length))); @@ -568,15 +574,23 @@ void setFile(String setting, byte[] bytes) { /** Remove the given setting from the keystore. */ void remove(String setting) { - assert isLoaded(); + ensureOpen(); Entry oldEntry = entries.get().remove(setting); if (oldEntry != null) { Arrays.fill(oldEntry.bytes, (byte)0); } } + private void ensureOpen() { + if (closed) { + throw new IllegalStateException("Keystore is closed"); + } + assert isLoaded() : "Keystore is not loaded"; + } + @Override - public void close() { + public synchronized void close() { + this.closed = true; for (Entry entry : entries.get().values()) { Arrays.fill(entry.bytes, (byte)0); } diff --git a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java index e59fc8ad51385..5cceba237e544 100644 --- a/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java +++ b/server/src/main/java/org/elasticsearch/discovery/zen/NodeJoinController.java @@ -380,7 +380,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS /** * a task indicated that the current node should become master, if no current master is known */ - private static final DiscoveryNode BECOME_MASTER_TASK = new DiscoveryNode("_BECOME_MASTER_TASK_", + public static final DiscoveryNode BECOME_MASTER_TASK = new DiscoveryNode("_BECOME_MASTER_TASK_", new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { @Override @@ -393,7 +393,7 @@ public String toString() { * a task that is used to signal the election is stopped and we should process pending joins. * it may be use in combination with {@link #BECOME_MASTER_TASK} */ - private static final DiscoveryNode FINISH_ELECTION_TASK = new DiscoveryNode("_FINISH_ELECTION_", + public static final DiscoveryNode FINISH_ELECTION_TASK = new DiscoveryNode("_FINISH_ELECTION_", new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { @Override public String toString() { diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllSettingsAction.java deleted file mode 100644 index f51cee37ad3f0..0000000000000 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetAllSettingsAction.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.indices; - -import org.elasticsearch.action.admin.indices.get.GetIndexRequest; -import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; -import org.elasticsearch.action.admin.indices.get.GetIndexResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.node.NodeClient; -import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; -import org.elasticsearch.common.xcontent.ToXContent.Params; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.BaseRestHandler; -import org.elasticsearch.rest.BytesRestResponse; -import org.elasticsearch.rest.RestController; -import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.rest.RestResponse; -import org.elasticsearch.rest.action.RestBuilderListener; - -import java.io.IOException; -import java.util.List; -import java.util.Set; - -import static org.elasticsearch.rest.RestRequest.Method.GET; -import static org.elasticsearch.rest.RestRequest.Method.HEAD; -import static org.elasticsearch.rest.RestStatus.OK; - -/** - * The REST handler for retrieving all settings - */ -public class RestGetAllSettingsAction extends BaseRestHandler { - - private final IndexScopedSettings indexScopedSettings; - private final SettingsFilter settingsFilter; - - public RestGetAllSettingsAction(final Settings settings, final RestController controller, - final IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter) { - super(settings); - this.indexScopedSettings = indexScopedSettings; - controller.registerHandler(GET, "/_settings", this); - this.settingsFilter = settingsFilter; - } - - @Override - public String getName() { - return "get_all_settings_action"; - } - - @Override - public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { - final GetIndexRequest getIndexRequest = new GetIndexRequest(); - getIndexRequest.indices(Strings.EMPTY_ARRAY); - getIndexRequest.features(Feature.SETTINGS); - getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions())); - getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local())); - getIndexRequest.humanReadable(request.paramAsBoolean("human", false)); - // This is required so the "flat_settings" parameter counts as consumed - request.paramAsBoolean("flat_settings", false); - final boolean defaults = request.paramAsBoolean("include_defaults", false); - return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener(channel) { - - @Override - public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception { - builder.startObject(); - { - for (final String index : response.indices()) { - builder.startObject(index); - { - writeSettings(response.settings().get(index), builder, request, defaults); - } - builder.endObject(); - } - } - builder.endObject(); - - return new BytesRestResponse(OK, builder); - } - - - private void writeSettings(final Settings settings, final XContentBuilder builder, - final Params params, final boolean defaults) throws IOException { - builder.startObject("settings"); - { - settings.toXContent(builder, params); - } - builder.endObject(); - if (defaults) { - builder.startObject("defaults"); - { - settingsFilter - .filter(indexScopedSettings.diff(settings, RestGetAllSettingsAction.this.settings)) - .toXContent(builder, request); - } - builder.endObject(); - } - } - }); - } - -} diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java index 9791994c773e2..d9fa50cf9410d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/indices/RestGetSettingsAction.java @@ -19,16 +19,12 @@ package org.elasticsearch.rest.action.admin.indices; -import com.carrotsearch.hppc.cursors.ObjectObjectCursor; - import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BytesRestResponse; @@ -46,6 +42,7 @@ public class RestGetSettingsAction extends BaseRestHandler { public RestGetSettingsAction(Settings settings, RestController controller) { super(settings); + controller.registerHandler(GET, "/_settings", this); controller.registerHandler(GET, "/_settings/{name}", this); controller.registerHandler(GET, "/{index}/_settings", this); controller.registerHandler(GET, "/{index}/_settings/{name}", this); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index dca17ce486607..9505875ae1ebc 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -46,6 +48,11 @@ */ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXContentFragment { + /** + * Standard deprecation logger for used to deprecate allowance of empty templates. + */ + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ScriptMetaData.class)); + /** * A builder used to modify the currently stored scripts data held within * the {@link ClusterState}. Scripts can be added or deleted, then built @@ -161,8 +168,8 @@ static ScriptMetaData deleteStoredScript(ScriptMetaData previous, String id) { * * {@code * { - * "" : "<{@link StoredScriptSource#fromXContent(XContentParser)}>", - * "" : "<{@link StoredScriptSource#fromXContent(XContentParser)}>", + * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", + * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", * ... * } * } @@ -209,6 +216,14 @@ public static ScriptMetaData fromXContent(XContentParser parser) throws IOExcept lang = id.substring(0, split); id = id.substring(split + 1); source = new StoredScriptSource(lang, parser.text(), Collections.emptyMap()); + + if (source.getSource().isEmpty()) { + if (source.getLang().equals(Script.DEFAULT_TEMPLATE_LANG)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } } exists = scripts.get(id); @@ -231,7 +246,7 @@ public static ScriptMetaData fromXContent(XContentParser parser) throws IOExcept } exists = scripts.get(id); - source = StoredScriptSource.fromXContent(parser); + source = StoredScriptSource.fromXContent(parser, true); if (exists == null) { scripts.put(id, source); diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index 9c52ff943d2a1..da6dad1dff384 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -32,6 +32,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; @@ -57,6 +59,11 @@ */ public class StoredScriptSource extends AbstractDiffable implements Writeable, ToXContentObject { + /** + * Standard deprecation logger for used to deprecate allowance of empty templates. + */ + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(StoredScriptSource.class)); + /** * Standard {@link ParseField} for outer level of stored script source. */ @@ -109,7 +116,7 @@ private void setLang(String lang) { private void setSource(XContentParser parser) { try { if (parser.currentToken() == Token.START_OBJECT) { - //this is really for search templates, that need to be converted to json format + // this is really for search templates, that need to be converted to json format XContentBuilder builder = XContentFactory.jsonBuilder(); source = Strings.toString(builder.copyCurrentStructure(parser)); options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()); @@ -131,8 +138,12 @@ private void setOptions(Map options) { /** * Validates the parameters and creates an {@link StoredScriptSource}. + * + * @param ignoreEmpty Specify as {@code true} to ignoreEmpty the empty source check. + * This allow empty templates to be loaded for backwards compatibility. + * This allow empty templates to be loaded for backwards compatibility. */ - private StoredScriptSource build() { + private StoredScriptSource build(boolean ignoreEmpty) { if (lang == null) { throw new IllegalArgumentException("must specify lang for stored script"); } else if (lang.isEmpty()) { @@ -140,9 +151,25 @@ private StoredScriptSource build() { } if (source == null) { - throw new IllegalArgumentException("must specify source for stored script"); + if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } else { + throw new IllegalArgumentException("must specify source for stored script"); + } } else if (source.isEmpty()) { - throw new IllegalArgumentException("source cannot be empty"); + if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } else { + throw new IllegalArgumentException("source cannot be empty"); + } } if (options.size() > 1 || options.size() == 1 && options.get(Script.CONTENT_TYPE_OPTION) == null) { @@ -257,6 +284,8 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon token = parser.nextToken(); if (token == Token.END_OBJECT) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); } @@ -271,7 +300,7 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon token = parser.nextToken(); if (token == Token.START_OBJECT) { - return PARSER.apply(parser, null).build(); + return PARSER.apply(parser, null).build(false); } else { throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "], expected [{, ]"); } @@ -280,7 +309,13 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon token = parser.nextToken(); if (token == Token.VALUE_STRING) { - return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, parser.text(), Collections.emptyMap()); + String source = parser.text(); + + if (source == null || source.isEmpty()) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } + + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, source, Collections.emptyMap()); } } @@ -293,7 +328,13 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon builder.copyCurrentStructure(parser); } - return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, Strings.toString(builder), Collections.emptyMap()); + String source = Strings.toString(builder); + + if (source == null || source.isEmpty()) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } + + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, source, Collections.emptyMap()); } } } catch (IOException ioe) { @@ -320,9 +361,12 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon * * Note that the "source" parameter can also handle template parsing including from * a complex JSON object. + * + * @param ignoreEmpty Specify as {@code true} to ignoreEmpty the empty source check. + * This allows empty templates to be loaded for backwards compatibility. */ - public static StoredScriptSource fromXContent(XContentParser parser) { - return PARSER.apply(parser, null).build(); + public static StoredScriptSource fromXContent(XContentParser parser, boolean ignoreEmpty) { + return PARSER.apply(parser, null).build(ignoreEmpty); } /** diff --git a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java index d887387d43fe9..ccdc1d6ab3323 100644 --- a/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/allocation/FilteringAllocationIT.java @@ -96,7 +96,7 @@ public void testDisablingAllocationFiltering() throws Exception { logger.info("--> creating an index with no replicas"); client().admin().indices().prepareCreate("test") - .setSettings(Settings.builder().put("index.number_of_replicas", 0)) + .setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0)) .execute().actionGet(); ensureGreen(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java index 32312f34e2104..f24dbfbd002ca 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java @@ -18,8 +18,36 @@ */ package org.elasticsearch.cluster.metadata; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.indices.cluster.ClusterStateChanges; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Set; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.isIn; public class AutoExpandReplicasTests extends ESTestCase { @@ -72,4 +100,104 @@ public void testInvalidValues() { } } + + private static final AtomicInteger nodeIdGenerator = new AtomicInteger(); + + protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) { + Set roles = new HashSet<>(randomSubsetOf(Sets.newHashSet(DiscoveryNode.Role.values()))); + for (DiscoveryNode.Role mustHaveRole : mustHaveRoles) { + roles.add(mustHaveRole); + } + final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet()); + return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles, + Version.CURRENT); + } + + /** + * Checks that when nodes leave the cluster that the auto-expand-replica functionality only triggers after failing the shards on + * the removed nodes. This ensures that active shards on other live nodes are not failed if the primary resided on a now dead node. + * Instead, one of the replicas on the live nodes first gets promoted to primary, and the auto-expansion (removing replicas) only + * triggers in a follow-up step. + */ + public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedException { + final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool); + + try { + List allNodes = new ArrayList<>(); + DiscoveryNode localNode = createNode(DiscoveryNode.Role.MASTER); // local node is the master + allNodes.add(localNode); + int numDataNodes = randomIntBetween(3, 5); + List dataNodes = new ArrayList<>(numDataNodes); + for (int i = 0; i < numDataNodes; i++) { + dataNodes.add(createNode(DiscoveryNode.Role.DATA)); + } + allNodes.addAll(dataNodes); + ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()])); + + CreateIndexRequest request = new CreateIndexRequest("index", + Settings.builder() + .put(SETTING_NUMBER_OF_SHARDS, 1) + .put(SETTING_AUTO_EXPAND_REPLICAS, "0-all").build()) + .waitForActiveShards(ActiveShardCount.NONE); + state = cluster.createIndex(state, request); + assertTrue(state.metaData().hasIndex("index")); + while (state.routingTable().index("index").shard(0).allShardsStarted() == false) { + logger.info(state); + state = cluster.applyStartedShards(state, + state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING)); + state = cluster.reroute(state, new ClusterRerouteRequest()); + } + + IndexShardRoutingTable preTable = state.routingTable().index("index").shard(0); + final Set unchangedNodeIds; + final IndexShardRoutingTable postTable; + + if (randomBoolean()) { + // simulate node removal + List nodesToRemove = randomSubsetOf(2, dataNodes); + unchangedNodeIds = dataNodes.stream().filter(n -> nodesToRemove.contains(n) == false) + .map(DiscoveryNode::getId).collect(Collectors.toSet()); + + state = cluster.removeNodes(state, nodesToRemove); + postTable = state.routingTable().index("index").shard(0); + + assertTrue("not all shards started in " + state.toString(), postTable.allShardsStarted()); + assertThat(postTable.toString(), postTable.getAllAllocationIds(), everyItem(isIn(preTable.getAllAllocationIds()))); + } else { + // fake an election where conflicting nodes are removed and readded + state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(null).build()).build(); + + List conflictingNodes = randomSubsetOf(2, dataNodes); + unchangedNodeIds = dataNodes.stream().filter(n -> conflictingNodes.contains(n) == false) + .map(DiscoveryNode::getId).collect(Collectors.toSet()); + + List nodesToAdd = conflictingNodes.stream() + .map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), n.getAttributes(), n.getRoles(), n.getVersion())) + .collect(Collectors.toList()); + + if (randomBoolean()) { + nodesToAdd.add(createNode(DiscoveryNode.Role.DATA)); + } + + state = cluster.joinNodesAndBecomeMaster(state, nodesToAdd); + postTable = state.routingTable().index("index").shard(0); + } + + Set unchangedAllocationIds = preTable.getShards().stream().filter(shr -> unchangedNodeIds.contains(shr.currentNodeId())) + .map(shr -> shr.allocationId().getId()).collect(Collectors.toSet()); + + assertThat(postTable.toString(), unchangedAllocationIds, everyItem(isIn(postTable.getAllAllocationIds()))); + + postTable.getShards().forEach( + shardRouting -> { + if (shardRouting.assignedToNode() && unchangedAllocationIds.contains(shardRouting.allocationId().getId())) { + assertTrue("Shard should be active: " + shardRouting, shardRouting.active()); + } + } + ); + } finally { + terminate(threadPool); + } + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index ad36457bde505..de8251ece255f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -185,7 +185,7 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { public void testDefaultSettings() throws Exception { final ClusterState result = executeTask(); - assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("5")); + assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("1")); } public void testSettingsFromClusterState() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index d5f3d71d7ee26..24f5a69656114 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -56,6 +56,7 @@ import java.util.stream.Stream; import static java.util.Collections.emptyMap; +import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED; import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; @@ -92,6 +93,21 @@ public static boolean isSplitable(int source, int target) { return source * x == target; } + public void testNumberOfShards() { + { + final Version versionCreated = VersionUtils.randomVersionBetween( + random(), + Version.V_6_0_0_alpha1, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1)); + final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); + assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(5)); + } + { + final Version versionCreated = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0_alpha1, Version.CURRENT); + final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated); + assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(1)); + } + } + public void testValidateShrinkIndex() { int numShards = randomIntBetween(2, 42); ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), diff --git a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java index e22836087367c..849841943ecc6 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -48,11 +48,13 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.instanceOf; public class KeyStoreWrapperTests extends ESTestCase { @@ -97,6 +99,19 @@ public void testCreate() throws Exception { assertTrue(keystore.getSettingNames().contains(KeyStoreWrapper.SEED_SETTING.getKey())); } + public void testCannotReadStringFromClosedKeystore() throws Exception { + KeyStoreWrapper keystore = KeyStoreWrapper.create(); + assertThat(keystore.getSettingNames(), Matchers.hasItem(KeyStoreWrapper.SEED_SETTING.getKey())); + assertThat(keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()), notNullValue()); + + keystore.close(); + + assertThat(keystore.getSettingNames(), Matchers.hasItem(KeyStoreWrapper.SEED_SETTING.getKey())); + final IllegalStateException exception = expectThrows(IllegalStateException.class, + () -> keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey())); + assertThat(exception.getMessage(), containsString("closed")); + } + public void testUpgradeNoop() throws Exception { KeyStoreWrapper keystore = KeyStoreWrapper.create(); SecureString seed = keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 9e8638af2491e..8bfd08244e466 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -87,6 +87,7 @@ import org.elasticsearch.transport.TransportService; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -232,6 +233,15 @@ public ClusterState addNodes(ClusterState clusterState, List node return runTasks(joinTaskExecutor, clusterState, nodes); } + public ClusterState joinNodesAndBecomeMaster(ClusterState clusterState, List nodes) { + List joinNodes = new ArrayList<>(); + joinNodes.add(NodeJoinController.BECOME_MASTER_TASK); + joinNodes.add(NodeJoinController.FINISH_ELECTION_TASK); + joinNodes.addAll(nodes); + + return runTasks(joinTaskExecutor, clusterState, joinNodes); + } + public ClusterState removeNodes(ClusterState clusterState, List nodes) { return runTasks(nodeRemovalExecutor, clusterState, nodes.stream() .map(n -> new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(n, "dummy reason")).collect(Collectors.toList())); diff --git a/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java b/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java index d5769cd192b75..32d4d48a44810 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -130,6 +132,45 @@ public void testBuilder() { assertEquals("1 + 1", result.getStoredScript("_id").getSource()); } + public void testLoadEmptyScripts() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject().field("mustache#empty", "").endObject(); + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput()); + ScriptMetaData.fromXContent(parser); + assertWarnings("empty templates should no longer be used"); + + builder = XContentFactory.jsonBuilder(); + builder.startObject().field("lang#empty", "").endObject(); + parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput()); + ScriptMetaData.fromXContent(parser); + assertWarnings("empty scripts should no longer be used"); + + builder = XContentFactory.jsonBuilder(); + builder.startObject().startObject("script").field("lang", "lang").field("source", "").endObject().endObject(); + parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput()); + ScriptMetaData.fromXContent(parser); + assertWarnings("empty scripts should no longer be used"); + + builder = XContentFactory.jsonBuilder(); + builder.startObject().startObject("script").field("lang", "mustache").field("source", "").endObject().endObject(); + parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput()); + ScriptMetaData.fromXContent(parser); + assertWarnings("empty templates should no longer be used"); + } + + @Override + protected boolean enableWarningsCheck() { + return true; + } + private ScriptMetaData randomScriptMetaData(XContentType sourceContentType, int minNumberScripts) throws IOException { ScriptMetaData.Builder builder = new ScriptMetaData.Builder(null); int numScripts = scaledRandomIntBetween(minNumberScripts, 32); diff --git a/server/src/test/java/org/elasticsearch/script/StoredScriptSourceTests.java b/server/src/test/java/org/elasticsearch/script/StoredScriptSourceTests.java index 168ec4fc553b9..8aa4ca57acfed 100644 --- a/server/src/test/java/org/elasticsearch/script/StoredScriptSourceTests.java +++ b/server/src/test/java/org/elasticsearch/script/StoredScriptSourceTests.java @@ -58,7 +58,7 @@ protected StoredScriptSource createTestInstance() { @Override protected StoredScriptSource doParseInstance(XContentParser parser) { - return StoredScriptSource.fromXContent(parser); + return StoredScriptSource.fromXContent(parser, false); } @Override diff --git a/server/src/test/java/org/elasticsearch/script/StoredScriptTests.java b/server/src/test/java/org/elasticsearch/script/StoredScriptTests.java index 2bf0216c546ec..79e3195f3d923 100644 --- a/server/src/test/java/org/elasticsearch/script/StoredScriptTests.java +++ b/server/src/test/java/org/elasticsearch/script/StoredScriptTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.AbstractSerializingTestCase; +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -204,6 +205,39 @@ public void testSourceParsingErrors() throws Exception { } } + public void testEmptyTemplateDeprecations() throws IOException { + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject().endObject(); + + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); + + assertThat(parsed, equalTo(source)); + assertWarnings("empty templates should no longer be used"); + } + + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject().field("template", "").endObject(); + + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); + + assertThat(parsed, equalTo(source)); + assertWarnings("empty templates should no longer be used"); + } + + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject().field("script").startObject().field("lang", "mustache") + .field("source", "").endObject().endObject(); + + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); + + assertThat(parsed, equalTo(source)); + assertWarnings("empty templates should no longer be used"); + } + } + @Override protected StoredScriptSource createTestInstance() { return new StoredScriptSource( @@ -219,7 +253,7 @@ protected Writeable.Reader instanceReader() { @Override protected StoredScriptSource doParseInstance(XContentParser parser) { - return StoredScriptSource.fromXContent(parser); + return StoredScriptSource.fromXContent(parser, false); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java index 43c7010d4b023..73a3c553b4d1a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/moving/avg/MovAvgIT.java @@ -19,9 +19,7 @@ package org.elasticsearch.search.aggregations.pipeline.moving.avg; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; @@ -45,7 +43,6 @@ import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -69,7 +66,6 @@ import static org.hamcrest.core.IsNull.nullValue; @ESIntegTestCase.SuiteScopeTestCase -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456") public class MovAvgIT extends ESIntegTestCase { private static final String INTERVAL_FIELD = "l_value"; private static final String VALUE_FIELD = "v_value"; @@ -1308,7 +1304,7 @@ private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, } else { assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]", - valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(countMovAvg.value())); + valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(valuesMovAvg.value())); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 950bb14eed9af..ab99bc0d97ba4 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -21,7 +21,10 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.http.HttpHost; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -29,6 +32,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; @@ -38,6 +42,7 @@ import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import org.junit.AfterClass; import org.junit.Before; +import org.junit.BeforeClass; import java.io.IOException; import java.nio.file.Files; @@ -94,6 +99,13 @@ protected ESClientYamlSuiteTestCase(ClientYamlTestCandidate testCandidate) { this.testCandidate = testCandidate; } + private static boolean useDefaultNumberOfShards; + + @BeforeClass + public static void initializeUseDefaultNumberOfShards() { + useDefaultNumberOfShards = usually(); + } + @Before public void initAndResetContext() throws Exception { if (restTestExecutionContext == null) { @@ -318,6 +330,14 @@ public void test() throws IOException { throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]"); } + if (useDefaultNumberOfShards == false + && testCandidate.getTestSection().getSkipSection().getFeatures().contains("default_shards") == false) { + final Request request = new Request("PUT", "/_template/global"); + request.setHeaders(new BasicHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters())); + request.setEntity(new StringEntity("{\"index_patterns\":[\"*\"],\"settings\":{\"index.number_of_shards\":2}}")); + adminClient().performRequest(request); + } + if (!testCandidate.getSetupSection().isEmpty()) { logger.debug("start setup test [{}]", testCandidate.getTestPath()); for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index ab9be65514a96..d074dd82af7a6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -37,6 +37,7 @@ public final class Features { private static final List SUPPORTED = unmodifiableList(Arrays.asList( "catch_unauthorized", + "default_shards", "embedded_stash_key", "headers", "stash_in_key", diff --git a/x-pack/docs/en/commands/syskeygen.asciidoc b/x-pack/docs/en/commands/syskeygen.asciidoc index 8683d801d58f1..f4a198ff4bf22 100644 --- a/x-pack/docs/en/commands/syskeygen.asciidoc +++ b/x-pack/docs/en/commands/syskeygen.asciidoc @@ -43,7 +43,7 @@ environment variable. === Examples The following command generates a `system_key` file in the -default `$ES_HOME/config/x-pack` directory: +default `$ES_HOME/config` directory: [source, sh] -------------------------------------------------- diff --git a/x-pack/docs/en/security/auditing.asciidoc b/x-pack/docs/en/security/auditing.asciidoc index 8bff8727f8358..ee508a5ac8d2d 100644 --- a/x-pack/docs/en/security/auditing.asciidoc +++ b/x-pack/docs/en/security/auditing.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[auditing]] -== Auditing Security Events +== Auditing security events You can enable auditing to keep track of security-related events such as authentication failures and refused connections. Logging these events enables you @@ -40,7 +41,7 @@ events are pushed to the index by setting [float] [[audit-event-types]] -=== Audit Event Types +=== Audit event types Each request may generate multiple audit events. The following is a list of the events that can be generated: @@ -81,11 +82,11 @@ The following is a list of the events that can be generated: [float] [[audit-event-attributes]] -=== Audit Event Attributes +=== Audit event attributes The following table shows the common attributes that can be associated with every event. -.Common Attributes +.Common attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -103,7 +104,7 @@ The following table shows the common attributes that can be associated with ever The following tables show the attributes that can be associated with each type of event. The log level determines which attributes are included in a log entry. -.REST anonymous_access_denied Attributes +.REST anonymous_access_denied attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -112,7 +113,7 @@ The log level determines which attributes are included in a log entry. | `request_body` | The body of the request, if enabled. |====== -.REST authentication_success Attributes +.REST authentication_success attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -123,7 +124,7 @@ The log level determines which attributes are included in a log entry. | `request_body` | The body of the request, if enabled. |====== -.REST authentication_failed Attributes +.REST authentication_failed attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -133,7 +134,7 @@ The log level determines which attributes are included in a log entry. | `request_body` | The body of the request, if enabled. |====== -.REST realm_authentication_failed Attributes +.REST realm_authentication_failed attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -146,7 +147,7 @@ The log level determines which attributes are included in a log entry. consulted realm. |====== -.Transport anonymous_access_denied Attributes +.Transport anonymous_access_denied attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -161,7 +162,7 @@ The log level determines which attributes are included in a log entry. pertains to (when applicable). |====== -.Transport authentication_success Attributes +.Transport authentication_success attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -176,7 +177,7 @@ The log level determines which attributes are included in a log entry. | `request` | The type of request that was executed. |====== -.Transport authentication_failed Attributes +.Transport authentication_failed attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -192,7 +193,7 @@ The log level determines which attributes are included in a log entry. pertains to (when applicable). |====== -.Transport realm_authentication_failed Attributes +.Transport realm_authentication_failed attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -211,7 +212,7 @@ The log level determines which attributes are included in a log entry. consulted realm. |====== -.Transport access_granted Attributes +.Transport access_granted attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -228,7 +229,7 @@ The log level determines which attributes are included in a log entry. pertains to (when applicable). |====== -.Transport access_denied Attributes +.Transport access_denied attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -245,7 +246,7 @@ The log level determines which attributes are included in a log entry. relates to (when applicable). |====== -.Transport tampered_request Attributes +.Transport tampered_request attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -261,7 +262,7 @@ The log level determines which attributes are included in a log entry. pertains to (when applicable). |====== -.IP Filter connection_granted Attributes +.IP filter connection_granted attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -271,7 +272,7 @@ The log level determines which attributes are included in a log entry. the request. |====== -.IP Filter connection_denied Attributes +.IP filter connection_denied attributes [cols="2,7",options="header"] |====== | Attribute | Description @@ -283,14 +284,14 @@ The log level determines which attributes are included in a log entry. [float] [[audit-log-output]] -=== Logfile Audit Output +=== Logfile audit output The `logfile` audit output is the default output for auditing. It writes data to the `_access.log` file in the logs directory. [float] [[audit-log-entry-format]] -=== Log Entry Format +=== Log entry format The format of a log entry is: @@ -318,7 +319,7 @@ The format of a log entry is: [float] [[audit-log-settings]] -=== Logfile Output Settings +=== Logfile output settings The events and some other information about what gets logged can be controlled using settings in the `elasticsearch.yml` file. See @@ -330,13 +331,13 @@ audited in plain text when including the request body in audit events. [[logging-file]] You can also configure how the logfile is written in the `log4j2.properties` -file located in `CONFIG_DIR/x-pack`. By default, audit information is appended to the +file located in `CONFIG_DIR`. By default, audit information is appended to the `_access.log` file located in the standard Elasticsearch `logs` directory (typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. [float] [[audit-log-ignore-policy]] -=== Logfile Audit Events Ignore Policies +=== Logfile audit events ignore policies The comprehensive audit trail is necessary to ensure accountability. It offers tremendous value during incident response and can even be required for demonstrating compliance. @@ -414,7 +415,7 @@ xpack.security.audit.logfile.events.ignore_filters: [float] [[audit-index]] -=== Index Audit Output +=== Index audit output In addition to logging to a file, you can store audit logs in Elasticsearch rolling indices. These indices can be either on the same cluster, or on a @@ -429,13 +430,13 @@ xpack.security.audit.outputs: [ index, logfile ] ---------------------------- For more configuration options, see -{ref}/auditing-settings.html#index-audit-settings[Audit Log Indexing Configuration Settings]. +{ref}/auditing-settings.html#index-audit-settings[Audit log indexing configuration settings]. IMPORTANT: No filtering is performed when auditing, so sensitive data may be audited in plain text when including the request body in audit events. [float] -==== Audit Index Settings +==== Audit index settings You can also configure settings for the indices that the events are stored in. These settings are configured in the `xpack.security.audit.index.settings` namespace @@ -451,7 +452,7 @@ xpack.security.audit.index.settings: ---------------------------- [float] -==== Forwarding Audit Logs to a Remote Cluster +==== Forwarding audit logs to a remote cluster To index audit events to a remote Elasticsearch cluster, you configure the following `xpack.security.audit.index.client` settings: diff --git a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc index 2069176172e31..c0461f4f33885 100644 --- a/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/active-directory-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[active-directory-realm]] === Active Directory user authentication diff --git a/x-pack/docs/en/security/authentication/anonymous-access.asciidoc b/x-pack/docs/en/security/authentication/anonymous-access.asciidoc index c95328e99a3eb..983348f8cf584 100644 --- a/x-pack/docs/en/security/authentication/anonymous-access.asciidoc +++ b/x-pack/docs/en/security/authentication/anonymous-access.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[anonymous-access]] -=== Enabling Anonymous Access +=== Enabling anonymous access Incoming requests are considered to be _anonymous_ if no authentication token can be extracted from the incoming request. By default, anonymous requests are rejected and an authentication error is returned (status code `401`). diff --git a/x-pack/docs/en/security/authentication/built-in-users.asciidoc b/x-pack/docs/en/security/authentication/built-in-users.asciidoc index 2400643755abd..74fc9f1e1db12 100644 --- a/x-pack/docs/en/security/authentication/built-in-users.asciidoc +++ b/x-pack/docs/en/security/authentication/built-in-users.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[built-in-users]] === Built-in users diff --git a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc index b43a0911e0467..6ea9b243aad4d 100644 --- a/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/configuring-ldap-realm.asciidoc @@ -56,7 +56,7 @@ xpack: group_search: base_dn: "dc=example,dc=com" files: - role_mapping: "CONFIG_DIR/x-pack/role_mapping.yml" + role_mapping: "CONFIG_DIR/role_mapping.yml" unmapped_groups_as_roles: false ------------------------------------------------------------ diff --git a/x-pack/docs/en/security/authentication/custom-realm.asciidoc b/x-pack/docs/en/security/authentication/custom-realm.asciidoc index a7df6f5ff865b..8e0114b7454c6 100644 --- a/x-pack/docs/en/security/authentication/custom-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/custom-realm.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[custom-realms]] -=== Integrating with Other Authentication Systems +=== Integrating with other authentication systems If you are using an authentication system that is not supported out-of-the-box by {security}, you can create a custom realm to interact with it to authenticate @@ -7,7 +8,7 @@ users. You implement a custom realm as an SPI loaded security extension as part of an ordinary elasticsearch plugin. [[implementing-custom-realm]] -==== Implementing a Custom Realm +==== Implementing a custom realm Sample code that illustrates the structure and implementation of a custom realm is provided in the https://github.com/elastic/shield-custom-realm-example[custom-realm-example] @@ -70,7 +71,7 @@ part of the `SecurityExtension` interface, it's available as part of the elastic . Bundle all in a single zip file. [[using-custom-realm]] -==== Using a Custom Realm to Authenticate Users +==== Using a custom realm to authenticate users To use a custom realm: diff --git a/x-pack/docs/en/security/authentication/file-realm.asciidoc b/x-pack/docs/en/security/authentication/file-realm.asciidoc index cf6f5cacd1c17..1161778bb801c 100644 --- a/x-pack/docs/en/security/authentication/file-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/file-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[file-realm]] === File-based user authentication diff --git a/x-pack/docs/en/security/authentication/internal-users.asciidoc b/x-pack/docs/en/security/authentication/internal-users.asciidoc index 53468363dc8d0..77571a53a56f3 100644 --- a/x-pack/docs/en/security/authentication/internal-users.asciidoc +++ b/x-pack/docs/en/security/authentication/internal-users.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[internal-users]] === Internal users diff --git a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc index 205c18429bc98..02d0162a9c9f9 100644 --- a/x-pack/docs/en/security/authentication/ldap-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/ldap-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[ldap-realm]] === LDAP user authentication diff --git a/x-pack/docs/en/security/authentication/native-realm.asciidoc b/x-pack/docs/en/security/authentication/native-realm.asciidoc index 3643e42e02a1c..f7b514b81449f 100644 --- a/x-pack/docs/en/security/authentication/native-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/native-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[native-realm]] === Native user authentication diff --git a/x-pack/docs/en/security/authentication/overview.asciidoc b/x-pack/docs/en/security/authentication/overview.asciidoc index ada5453c7a765..da5f6a4ea3cea 100644 --- a/x-pack/docs/en/security/authentication/overview.asciidoc +++ b/x-pack/docs/en/security/authentication/overview.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[setting-up-authentication]] == User authentication diff --git a/x-pack/docs/en/security/authentication/pki-realm.asciidoc b/x-pack/docs/en/security/authentication/pki-realm.asciidoc index 4fc91717f9342..6ce9b0e0770a4 100644 --- a/x-pack/docs/en/security/authentication/pki-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/pki-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[pki-realm]] === PKI user authentication diff --git a/x-pack/docs/en/security/authentication/realms.asciidoc b/x-pack/docs/en/security/authentication/realms.asciidoc index 7bd48c5c8f017..ec0945b5a113c 100644 --- a/x-pack/docs/en/security/authentication/realms.asciidoc +++ b/x-pack/docs/en/security/authentication/realms.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[realms]] === Realms diff --git a/x-pack/docs/en/security/authentication/saml-guide.asciidoc b/x-pack/docs/en/security/authentication/saml-guide.asciidoc index d1f7961fecbf3..740f51c877ded 100644 --- a/x-pack/docs/en/security/authentication/saml-guide.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-guide.asciidoc @@ -1,6 +1,7 @@ +[role="xpack"] [[saml-guide]] -== Configuring SAML single-sign-on on the Elastic Stack +== Configuring SAML single-sign-on on the {stack} The Elastic Stack supports SAML single-sign-on (SSO) into {kib}, using {es} as a backend service. In SAML terminology, the Elastic Stack is operating as a diff --git a/x-pack/docs/en/security/authentication/saml-realm.asciidoc b/x-pack/docs/en/security/authentication/saml-realm.asciidoc index c05f82d341b03..a55ae270a19a1 100644 --- a/x-pack/docs/en/security/authentication/saml-realm.asciidoc +++ b/x-pack/docs/en/security/authentication/saml-realm.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[saml-realm]] === SAML authentication {security} supports user authentication using SAML Single Sign On. diff --git a/x-pack/docs/en/security/authentication/user-cache.asciidoc b/x-pack/docs/en/security/authentication/user-cache.asciidoc index ba2b363a843ed..36af070bf067b 100644 --- a/x-pack/docs/en/security/authentication/user-cache.asciidoc +++ b/x-pack/docs/en/security/authentication/user-cache.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[controlling-user-cache]] -=== Controlling the User Cache +=== Controlling the user cache User credentials are cached in memory on each node to avoid connecting to a remote authentication service or hitting the disk for every incoming request. @@ -34,7 +35,7 @@ setting the `cache_hash_algo` setting to any of the following: |======================= [[cache-eviction-api]] -==== Evicting Users from the Cache +==== Evicting users from the cache {security} exposes a {ref}/security-api-clear-cache.html[Clear Cache API] you can use diff --git a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc index 6916e2ab2ca30..05c9359df5aeb 100644 --- a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[securing-aliases]] -=== Granting Privileges for Indices & Aliases +=== Granting privileges for indices and aliases Elasticsearch allows to execute operations against {ref}/indices-aliases.html[index aliases], which are effectively virtual indices. An alias points to one or more indices, diff --git a/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc b/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc index 9056467ced9f9..c218fa04f8ec7 100644 --- a/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc +++ b/x-pack/docs/en/security/authorization/custom-roles-provider.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[custom-roles-provider]] -=== Custom Roles Provider Extension +=== Custom roles provider extension If you need to retrieve user roles from a system not supported out-of-the-box by {security}, you can create a custom roles provider to retrieve and resolve @@ -7,7 +8,7 @@ roles. You implement a custom roles provider as an SPI loaded security extension as part of an ordinary elasticsearch plugin. [[implementing-custom-roles-provider]] -==== Implementing a Custom Roles Provider +==== Implementing a custom roles provider To create a custom roles provider: @@ -62,7 +63,7 @@ part of the `SecurityExtension` interface, it's available as part of the elastic . Bundle all in a single zip file. [[using-custom-roles-provider]] -==== Using a Custom Roles Provider to Resolve Roles +==== Using a custom roles provider to resolve roles To use a custom roles provider: diff --git a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc index 88d0e157ca052..a1aa44895c6a6 100644 --- a/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc +++ b/x-pack/docs/en/security/authorization/field-and-document-access-control.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[field-and-document-access-control]] -=== Setting Up Field and Document Level Security +=== Setting up field and document level security You can control access to data within an index by adding field and document level security permissions to a role. Field level security permissions restrict access @@ -23,7 +24,7 @@ document level permissions per index. See <>. ===================================================================== [[field-level-security]] -==== Field Level Security +==== Field level security To enable field level security, specify the fields that each role can access as part of the indices permissions in a role definition. Field level security is @@ -235,7 +236,7 @@ The resulting permission is equal to: [[document-level-security]] -==== Document Level Security +==== Document level security Document level security restricts the documents that users have read access to. To enable document level security, specify a query that matches all the @@ -292,7 +293,7 @@ For example, the following role grants read access only to the documents whose NOTE: `query` also accepts queries written as string values. [[templating-role-query]] -===== Templating a Role Query +===== Templating a role query You can use Mustache templates in a role query to insert the username of the current authenticated user into the role. Like other places in {es} that support @@ -358,7 +359,7 @@ based on the `group.id` field in your documents: -------------------------------------------------- [[set-security-user-processor]] -===== Set Security User Ingest Processor +===== Set security user ingest processor If an index is shared by many small users it makes sense to put all these users into the same index. Having a dedicated index or shard per user is wasteful. @@ -416,7 +417,7 @@ to the `user` field for all documents that are processed by this pipeline: -------------------------------------------------- [[multiple-roles-dls-fls]] -==== Multiple Roles with Document and Field Level Security +==== Multiple roles with document and field level security A user can have many roles and each role can define different permissions on the same index. It is important to understand the behavior of document and field diff --git a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc index 590546e217c86..cf8373a65f335 100644 --- a/x-pack/docs/en/security/authorization/mapping-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/mapping-roles.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[mapping-roles]] -=== Mapping Users and Groups to Roles +=== Mapping users and groups to roles If you authenticate users with the `native` or `file` realms, you can manage role assignment by using the <> or @@ -24,29 +25,26 @@ you are able to map users to both API-managed roles and file-managed roles (and likewise for file-based role-mappings). [[mapping-roles-api]] -==== Using the Role Mapping API +==== Using the role mapping API You can define role-mappings through the {ref}/security-api-role-mapping.html[role mapping API]. [[mapping-roles-file]] -==== Using Role Mapping Files +==== Using role mapping files To use file based role-mappings, you must configure the mappings in a YAML file and copy it to each node in the cluster. Tools like Puppet or Chef can help with this. -By default, role mappings are stored in `ES_PATH_CONF/x-pack/role_mapping.yml`, +By default, role mappings are stored in `ES_PATH_CONF/role_mapping.yml`, where `ES_PATH_CONF` is `ES_HOME/config` (zip/tar installations) or `/etc/elasticsearch` (package installations). To specify a different location, -you configure the `files.role_mapping` realm settings in `elasticsearch.yml`. -This setting enables you to use a different set of mappings for each realm type: - -|===== -| `xpack.security.authc.ldap.files.role_mapping` | | | The location of the role mappings for LDAP realms. -| `xpack.security.authc.active_directory.files.role_mapping` | | | The location of the role mappings for Active Directory realms. -| `xpack.security.authc.pki.files.role_mapping` | | | The location of the role mappings for PKI realms. -|===== +you configure the `files.role_mapping` setting in the +{ref}/security-settings.html#ref-ad-settings[Active Directory], +{ref}/security-settings.html#ref-ldap-settings[LDAP], and +{ref}/security-settings.html#ref-pki-settings[PKI] realm settings in +`elasticsearch.yml`. Within the role mapping file, the security roles are keys and groups and users are values. The mappings can have a many-to-many relationship. When you map roles @@ -59,10 +57,10 @@ You can change this default behavior by changing the this is a common setting in Elasticsearch, changing its value might effect other schedules in the system. -==== Realm Specific Details +==== Realm specific details [float] [[ldap-role-mapping]] -===== Active Directory and LDAP Realms +===== Active Directory and LDAP realms To specify users and groups in the role mappings, you use their _Distinguished Names_ (DNs). A DN is a string that uniquely identifies the user @@ -116,7 +114,7 @@ PUT _xpack/security/role_mapping/basic_users [float] [[pki-role-mapping]] -===== PKI Realms +===== PKI realms PKI realms support mapping users to roles, but you cannot map groups as the PKI realm has no notion of a group. diff --git a/x-pack/docs/en/security/authorization.asciidoc b/x-pack/docs/en/security/authorization/overview.asciidoc similarity index 96% rename from x-pack/docs/en/security/authorization.asciidoc rename to x-pack/docs/en/security/authorization/overview.asciidoc index 4a3ffe399de1b..9dc8185db4d34 100644 --- a/x-pack/docs/en/security/authorization.asciidoc +++ b/x-pack/docs/en/security/authorization/overview.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[authorization]] -== Configuring Role-based Access Control +== Configuring role-based access control {security} introduces the concept of _authorization_ to {es}. Authorization is the process of determining whether the user behind an incoming @@ -8,7 +9,7 @@ successfully authenticated and the user behind the request is identified. [[roles]] [float] -=== Roles, Permissions and Privileges +=== Roles, permissions, and privileges The authorization process revolves around the following 5 constructs: @@ -49,7 +50,7 @@ then assign users to the roles. These can be assigned to users in a number of ways depending on the realms by which the users are authenticated. [[built-in-roles]] -=== Built-in Roles +=== Built-in roles {security} applies a default role to all users, including <>. The default role enables users to access @@ -164,7 +165,7 @@ stats. [[defining-roles]] -=== Defining Roles +=== Defining roles A role is defined by the following JSON structure: @@ -276,14 +277,14 @@ see <>. [float] [[roles-management-ui]] -=== Role Management UI +=== Role management UI {security} enables you to easily manage users and roles from within {kib}. To manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. [float] [[roles-management-api]] -=== Role Management API +=== Role management API The _Role Management APIs_ enable you to add, update, remove and retrieve roles dynamically. When you use the APIs to manage roles in the `native` realm, the @@ -292,10 +293,10 @@ see {ref}/security-api-roles.html[Role Management APIs]. [float] [[roles-management-file]] -=== File-based Role Management +=== File-based role management Apart from the _Role Management APIs_, roles can also be defined in local -`roles.yml` file located in `CONFIG_DIR/x-pack`. This is a YAML file where each +`roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each role definition is keyed by its name. [IMPORTANT] @@ -338,12 +339,12 @@ click_admins: {security} continuously monitors the `roles.yml` file and automatically picks up and applies any changes to it. -include::authorization/alias-privileges.asciidoc[] +include::alias-privileges.asciidoc[] -include::authorization/mapping-roles.asciidoc[] +include::mapping-roles.asciidoc[] -include::authorization/field-and-document-access-control.asciidoc[] +include::field-and-document-access-control.asciidoc[] -include::authorization/run-as-privilege.asciidoc[] +include::run-as-privilege.asciidoc[] -include::authorization/custom-roles-provider.asciidoc[] +include::custom-roles-provider.asciidoc[] diff --git a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc index e246f2b194281..93d11c0ab2af9 100644 --- a/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc +++ b/x-pack/docs/en/security/authorization/run-as-privilege.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[run-as-privilege]] -=== Submitting Requests on Behalf of Other Users +=== Submitting requests on behalf of other users {security} supports a permission that enables an authenticated user to submit requests on behalf of other users. If your application already authenticates diff --git a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc index e25586dfb371c..cbf4ede328e48 100644 --- a/x-pack/docs/en/security/ccs-clients-integrations.asciidoc +++ b/x-pack/docs/en/security/ccs-clients-integrations.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ccs-clients-integrations]] -== Cross Cluster Search, Clients and Integrations +== Cross cluster search, clients, and integrations When using {ref}/modules-cross-cluster-search.html[Cross Cluster Search] you need to take extra steps to secure communications with the connected diff --git a/x-pack/docs/en/security/getting-started.asciidoc b/x-pack/docs/en/security/getting-started.asciidoc index 8aa35a9428160..b8f1183cddf89 100644 --- a/x-pack/docs/en/security/getting-started.asciidoc +++ b/x-pack/docs/en/security/getting-started.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[security-getting-started]] -== Getting Started with Security +== Getting started with security To secure a cluster, you must enable {security} on every node in the cluster. Basic authentication is enabled by default--to communicate diff --git a/x-pack/docs/en/security/how-security-works.asciidoc b/x-pack/docs/en/security/how-security-works.asciidoc index ae402dfe05eb4..dcc152c2bcaab 100644 --- a/x-pack/docs/en/security/how-security-works.asciidoc +++ b/x-pack/docs/en/security/how-security-works.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[how-security-works]] -== How Security Works +== How security works An Elasticsearch cluster is typically made out of many moving parts. There are the Elasticsearch nodes that form the cluster, and often Logstash instances, @@ -64,7 +65,7 @@ For more information on user authentication see <> [float] -=== Node/Client Authentication and Channel Encryption +=== Node/client authentication and channel encryption {security} supports configuring SSL/TLS for securing the communication channels to, from and within the cluster. This support accounts for: diff --git a/x-pack/docs/en/security/index.asciidoc b/x-pack/docs/en/security/index.asciidoc index 188353d01a3fb..d5f970a3fb826 100644 --- a/x-pack/docs/en/security/index.asciidoc +++ b/x-pack/docs/en/security/index.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[xpack-security]] -= Securing the Elastic Stack += Securing the {stack} [partintro] -- @@ -100,7 +101,7 @@ include::how-security-works.asciidoc[] include::authentication/overview.asciidoc[] -include::authorization.asciidoc[] +include::authorization/overview.asciidoc[] include::auditing.asciidoc[] diff --git a/x-pack/docs/en/security/limitations.asciidoc b/x-pack/docs/en/security/limitations.asciidoc index c2616ac6565bd..c127ee3d7967c 100644 --- a/x-pack/docs/en/security/limitations.asciidoc +++ b/x-pack/docs/en/security/limitations.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[security-limitations]] == Security Limitations diff --git a/x-pack/docs/en/security/reference.asciidoc b/x-pack/docs/en/security/reference.asciidoc index 90668651b5d50..21138138cfbf9 100644 --- a/x-pack/docs/en/security/reference.asciidoc +++ b/x-pack/docs/en/security/reference.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[security-reference]] == Reference * <> diff --git a/x-pack/docs/en/security/reference/files.asciidoc b/x-pack/docs/en/security/reference/files.asciidoc index cec8f9d1a3bcc..dcf673d9a9f26 100644 --- a/x-pack/docs/en/security/reference/files.asciidoc +++ b/x-pack/docs/en/security/reference/files.asciidoc @@ -3,7 +3,7 @@ The {security} uses the following files: -* `CONFIG_DIR/x-pack/roles.yml` defines the roles in use on the cluster +* `CONFIG_DIR/roles.yml` defines the roles in use on the cluster (read more <>). * `CONFIG_DIR/elasticsearch-users` defines the users and their hashed passwords for @@ -12,12 +12,12 @@ The {security} uses the following files: * `CONFIG_DIR/elasticsearch-users_roles` defines the user roles assignment for the the <>. -* `CONFIG_DIR/x-pack/role_mapping.yml` defines the role assignments for a +* `CONFIG_DIR/role_mapping.yml` defines the role assignments for a Distinguished Name (DN) to a role. This allows for LDAP and Active Directory groups and users and PKI users to be mapped to roles (read more <>). -* `CONFIG_DIR/x-pack/log4j2.properties` contains audit information (read more +* `CONFIG_DIR/log4j2.properties` contains audit information (read more <>). [[security-files-location]] diff --git a/x-pack/docs/en/security/securing-communications.asciidoc b/x-pack/docs/en/security/securing-communications.asciidoc index e876ce9160b86..ef07f0113cb59 100644 --- a/x-pack/docs/en/security/securing-communications.asciidoc +++ b/x-pack/docs/en/security/securing-communications.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[encrypting-communications]] -== Encrypting Communications +== Encrypting communications Elasticsearch nodes store data that may be confidential. Attacks on the data may come from the network. These attacks could include sniffing of the data, @@ -21,7 +22,7 @@ include::securing-communications/setting-up-ssl.asciidoc[] //TO-DO: These sections can be removed when all links to them are removed. [[ciphers]] -=== Enabling Cipher Suites for Stronger Encryption +=== Enabling cipher suites for stronger encryption See {ref}/ciphers.html[Enabling Cipher Suites for Stronger Encryption]. diff --git a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc index affac534b6f01..d93d4e523d9f2 100644 --- a/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc +++ b/x-pack/docs/en/security/securing-communications/configuring-tls-docker.asciidoc @@ -41,7 +41,7 @@ instances: `.env`: [source,yaml] ---- -CERTS_DIR=/usr/share/elasticsearch/config/x-pack/certificates <1> +CERTS_DIR=/usr/share/elasticsearch/config/certificates <1> ELASTIC_PASSWORD=PleaseChangeMe <2> ---- <1> The path, inside the Docker image, where certificates are expected to be found. @@ -66,18 +66,18 @@ services: image: docker.elastic.co/elasticsearch/elasticsearch-platinum:{version} command: > bash -c ' - if [[ ! -d config/x-pack/certificates/certs ]]; then - mkdir config/x-pack/certificates/certs; + if [[ ! -d config/certificates/certs ]]; then + mkdir config/certificates/certs; fi; if [[ ! -f /local/certs/bundle.zip ]]; then - bin/elasticsearch-certgen --silent --in config/x-pack/certificates/instances.yml --out config/x-pack/certificates/certs/bundle.zip; - unzip config/x-pack/certificates/certs/bundle.zip -d config/x-pack/certificates/certs; <1> + bin/elasticsearch-certgen --silent --in config/certificates/instances.yml --out config/certificates/certs/bundle.zip; + unzip config/certificates/certs/bundle.zip -d config/certificates/certs; <1> fi; - chgrp -R 0 config/x-pack/certificates/certs + chgrp -R 0 config/certificates/certs ' user: $\{UID:-1000\} working_dir: /usr/share/elasticsearch - volumes: ['.:/usr/share/elasticsearch/config/x-pack/certificates'] + volumes: ['.:/usr/share/elasticsearch/config/certificates'] ---- <1> The new node certificates and CA certificate+key are placed under the local directory `certs`. @@ -184,9 +184,9 @@ WARNING: Windows users not running PowerShell will need to remove `\` and join l ---- docker exec es01 /bin/bash -c "bin/elasticsearch-setup-passwords \ auto --batch \ --Expack.ssl.certificate=x-pack/certificates/es01/es01.crt \ --Expack.ssl.certificate_authorities=x-pack/certificates/ca/ca.crt \ --Expack.ssl.key=x-pack/certificates/es01/es01.key \ +-Expack.ssl.certificate=certificates/es01/es01.crt \ +-Expack.ssl.certificate_authorities=certificates/ca/ca.crt \ +-Expack.ssl.key=certificates/es01/es01.key \ --url https://localhost:9200" ---- -- diff --git a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc index dae088667c6fc..eb8e985a65b59 100644 --- a/x-pack/docs/en/security/securing-communications/tls-http.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-http.asciidoc @@ -40,9 +40,9 @@ This name should match the `keystore.path` value. [source, yaml] -------------------------------------------------- xpack.security.http.ssl.enabled: true -xpack.security.http.ssl.key: /home/es/config/x-pack/node01.key <1> -xpack.security.http.ssl.certificate: /home/es/config/x-pack/node01.crt <2> -xpack.security.http.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <3> +xpack.security.http.ssl.key: /home/es/config/node01.key <1> +xpack.security.http.ssl.certificate: /home/es/config/node01.crt <2> +xpack.security.http.ssl.certificate_authorities: [ "/home/es/config/ca.crt" ] <3> -------------------------------------------------- <1> The full path to the node key file. This must be a location within the {es} configuration directory. diff --git a/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc index f10ced77f718a..b7f0b7d300590 100644 --- a/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-ldap.asciidoc @@ -29,7 +29,7 @@ xpack: order: 0 url: "ldaps://ldap.example.com:636" ssl: - certificate_authorities: [ "CONFIG_DIR/x-pack/cacert.pem" ] + certificate_authorities: [ "CONFIG_DIR/cacert.pem" ] -------------------------------------------------- The CA certificate must be a PEM encoded. diff --git a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc index 9bce211a1e278..2e20a20f907ef 100644 --- a/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc +++ b/x-pack/docs/en/security/securing-communications/tls-transport.asciidoc @@ -43,9 +43,9 @@ This name should match the `keystore.path` value. -------------------------------------------------- xpack.security.transport.ssl.enabled: true xpack.security.transport.ssl.verification_mode: certificate <1> -xpack.security.transport.ssl.key: /home/es/config/x-pack/node01.key <2> -xpack.security.transport.ssl.certificate: /home/es/config/x-pack/node01.crt <3> -xpack.security.transport.ssl.certificate_authorities: [ "/home/es/config/x-pack/ca.crt" ] <4> +xpack.security.transport.ssl.key: /home/es/config/node01.key <2> +xpack.security.transport.ssl.certificate: /home/es/config/node01.crt <3> +xpack.security.transport.ssl.certificate_authorities: [ "/home/es/config/ca.crt" ] <4> -------------------------------------------------- <1> If you used the `--dns` or `--ip` options with the `elasticsearch-certutil cert` command and you want to enable strict hostname checking, set the verification mode to diff --git a/x-pack/docs/en/security/troubleshooting.asciidoc b/x-pack/docs/en/security/troubleshooting.asciidoc index e805ed07a7dec..c202ed9dbedb0 100644 --- a/x-pack/docs/en/security/troubleshooting.asciidoc +++ b/x-pack/docs/en/security/troubleshooting.asciidoc @@ -1,3 +1,4 @@ +[role="xpack"] [[security-troubleshooting]] == {security} Troubleshooting ++++ diff --git a/x-pack/docs/en/security/using-ip-filtering.asciidoc b/x-pack/docs/en/security/using-ip-filtering.asciidoc index 37beced5a9455..817975c69de9d 100644 --- a/x-pack/docs/en/security/using-ip-filtering.asciidoc +++ b/x-pack/docs/en/security/using-ip-filtering.asciidoc @@ -1,5 +1,6 @@ +[role="xpack"] [[ip-filtering]] -== Restricting Connections with IP Filtering +== Restricting connections with IP filtering You can apply IP filtering to application clients, node clients, or transport clients, in addition to other nodes that are attempting to join the cluster. @@ -92,7 +93,7 @@ transport.profiles.client.xpack.security.filter.deny: _all NOTE: When you do not specify a profile, `default` is used automatically. [float] -=== HTTP Filtering +=== HTTP filtering You may want to have different IP filtering for the transport and HTTP protocols. @@ -106,7 +107,7 @@ xpack.security.http.filter.deny: _all [float] [[dynamic-ip-filtering]] -==== Dynamically updating ip filter settings +==== Dynamically updating IP filter settings In case of running in an environment with highly dynamic IP addresses like cloud based hosting, it is very hard to know the IP addresses upfront when provisioning diff --git a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc b/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc index a9150ec056c1e..6ee9c29b44ff3 100644 --- a/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc +++ b/x-pack/docs/en/setup/bootstrap-checks-xes.asciidoc @@ -43,7 +43,7 @@ to each user. If you use files to manage the role mappings, you must configure a YAML file and copy it to each node in the cluster. By default, role mappings are stored in -`ES_PATH_CONF/x-pack/role_mapping.yml`. Alternatively, you can specify a +`ES_PATH_CONF/role_mapping.yml`. Alternatively, you can specify a different role mapping file for each type of realm and specify its location in the `elasticsearch.yml` file. For more information, see {xpack-ref}/mapping-roles.html#mapping-roles-file[Using Role Mapping Files]. diff --git a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc index c24668a688dca..57d330510971d 100644 --- a/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc +++ b/x-pack/docs/en/watcher/trigger/schedule/cron.asciidoc @@ -213,7 +213,7 @@ minute during the weekend: {xpack} ships with a `elasticsearch-croneval` command line tool that you can use to verify that your cron expressions are valid and produce the expected results. This tool is -provided in the `$ES_HOME/bin/x-pack` directory. +provided in the `$ES_HOME/bin` directory. To verify a cron expression, simply pass it in as a parameter to `elasticsearch-croneval`: diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java index a44d39a0d7a56..c143978468dfd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/nio/SSLDriver.java @@ -113,7 +113,13 @@ public ByteBuffer getNetworkReadBuffer() { } public void read(InboundChannelBuffer buffer) throws SSLException { - currentMode.read(buffer); + Mode modePriorToRead; + do { + modePriorToRead = currentMode; + currentMode.read(buffer); + // If we switched modes we want to read again as there might be unhandled bytes that need to be + // handled by the new mode. + } while (modePriorToRead != currentMode); } public boolean readyForApplicationWrites() { @@ -365,8 +371,9 @@ public void read(InboundChannelBuffer buffer) throws SSLException { try { SSLEngineResult result = unwrap(buffer); handshakeStatus = result.getHandshakeStatus(); - continueUnwrap = result.bytesConsumed() > 0; handshake(); + // If we are done handshaking we should exit the handshake read + continueUnwrap = result.bytesConsumed() > 0 && currentMode.isHandshake(); } catch (SSLException e) { closingInternal(); throw e; diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java index 17f8973cea386..0eb1888487cf1 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcConnection.java @@ -43,6 +43,10 @@ public class JdbcConnection implements Connection, JdbcWrapper { private String catalog; private String schema; + /** + * The SQLException is the only type of Exception the JDBC API can throw (and that the user expects). + * If we remove it, we need to make sure no other types of Exceptions (runtime or otherwise) are thrown + */ public JdbcConnection(JdbcConfiguration connectionInfo) throws SQLException { cfg = connectionInfo; client = new JdbcHttpClient(connectionInfo); @@ -428,4 +432,4 @@ int esInfoMajorVersion() throws SQLException { int esInfoMinorVersion() throws SQLException { return client.serverInfo().minorVersion; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java index fab21c541799e..c773dd5d17dc1 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/JdbcStatement.java @@ -7,7 +7,7 @@ import org.elasticsearch.xpack.sql.jdbc.net.client.Cursor; import org.elasticsearch.xpack.sql.jdbc.net.client.RequestMeta; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.sql.Connection; import java.sql.ResultSet; @@ -220,7 +220,7 @@ public int getFetchSize() throws SQLException { // unset (in this case -1 which the user cannot set) - in this case, the default fetch size is returned // 0 meaning the hint is disabled (the user has called setFetch) // >0 means actual hint - + // tl;dr - unless the user set it, returning the default is fine return requestMeta.fetchSize(); } @@ -402,4 +402,4 @@ final void resultSetWasClosed() throws SQLException { close(); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java index 4aaf337f2b772..06825ee6e3f96 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/jdbc/PreparedQuery.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.jdbc.jdbc; import org.elasticsearch.xpack.sql.jdbc.JdbcSQLException; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.type.DataType; import java.sql.JDBCType; @@ -73,7 +73,7 @@ String sql() { */ List params() { return Arrays.stream(this.params).map( - paramInfo -> new SqlTypedParamValue(paramInfo.value, DataType.fromJdbcType(paramInfo.type)) + paramInfo -> new SqlTypedParamValue(DataType.fromJdbcType(paramInfo.type), paramInfo.value) ).collect(Collectors.toList()); } @@ -86,4 +86,4 @@ public String toString() { static PreparedQuery prepare(String sql) throws SQLException { return new PreparedQuery(sql, SqlQueryParameterAnalyzer.parametersCount(sql)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java index ab4cdff985863..89ee78e0bae9e 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java @@ -5,22 +5,21 @@ */ package org.elasticsearch.xpack.sql.jdbc.net.client; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; -import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlQueryRequest; +import org.elasticsearch.xpack.sql.proto.MainResponse; +import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.sql.SQLException; import java.util.List; -import java.util.TimeZone; import java.util.stream.Collectors; import static org.elasticsearch.xpack.sql.client.shared.StringUtils.EMPTY; @@ -34,6 +33,10 @@ public class JdbcHttpClient { private final JdbcConfiguration conCfg; private InfoResponse serverInfo; + /** + * The SQLException is the only type of Exception the JDBC API can throw (and that the user expects). + * If we remove it, we need to make sure no other types of Exceptions (runtime or otherwise) are thrown + */ public JdbcHttpClient(JdbcConfiguration conCfg) throws SQLException { httpClient = new HttpClient(conCfg); this.conCfg = conCfg; @@ -45,9 +48,9 @@ public boolean ping(long timeoutInMs) throws SQLException { public Cursor query(String sql, List params, RequestMeta meta) throws SQLException { int fetch = meta.fetchSize() > 0 ? meta.fetchSize() : conCfg.pageSize(); - SqlQueryRequest sqlRequest = new SqlQueryRequest(AbstractSqlRequest.Mode.JDBC, sql, params, null, - AbstractSqlQueryRequest.DEFAULT_TIME_ZONE, - fetch, TimeValue.timeValueMillis(meta.timeoutInMs()), TimeValue.timeValueMillis(meta.queryTimeoutInMs()), ""); + SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.JDBC, sql, params, null, + Protocol.TIME_ZONE, + fetch, TimeValue.timeValueMillis(meta.timeoutInMs()), TimeValue.timeValueMillis(meta.queryTimeoutInMs())); SqlQueryResponse response = httpClient.query(sqlRequest); return new DefaultCursor(this, response.cursor(), toJdbcColumnInfo(response.columns()), response.rows(), meta); } @@ -57,10 +60,8 @@ public Cursor query(String sql, List params, RequestMeta met * the scroll id to use to fetch the next page. */ public Tuple>> nextPage(String cursor, RequestMeta meta) throws SQLException { - SqlQueryRequest sqlRequest = new SqlQueryRequest().cursor(cursor); - sqlRequest.mode(AbstractSqlRequest.Mode.JDBC); - sqlRequest.requestTimeout(TimeValue.timeValueMillis(meta.timeoutInMs())); - sqlRequest.pageTimeout(TimeValue.timeValueMillis(meta.queryTimeoutInMs())); + SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.JDBC, cursor, TimeValue.timeValueMillis(meta.timeoutInMs()), + TimeValue.timeValueMillis(meta.queryTimeoutInMs())); SqlQueryResponse response = httpClient.query(sqlRequest); return new Tuple<>(response.cursor(), response.rows()); } @@ -78,13 +79,13 @@ public InfoResponse serverInfo() throws SQLException { private InfoResponse fetchServerInfo() throws SQLException { MainResponse mainResponse = httpClient.serverInfo(); - return new InfoResponse(mainResponse.getClusterName().value(), mainResponse.getVersion().major, mainResponse.getVersion().minor); + return new InfoResponse(mainResponse.getClusterName(), mainResponse.getVersion().major, mainResponse.getVersion().minor); } /** * Converts REST column metadata into JDBC column metadata */ - private List toJdbcColumnInfo(List columns) { + private List toJdbcColumnInfo(List columns) { return columns.stream().map(columnInfo -> new ColumnInfo(columnInfo.name(), columnInfo.jdbcType(), EMPTY, EMPTY, EMPTY, EMPTY, columnInfo.displaySize()) ).collect(Collectors.toList()); diff --git a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java index 612c46fbe56ef..dc4ba9fa244b5 100644 --- a/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java +++ b/x-pack/plugin/sql/jdbc/src/test/java/org/elasticsearch/xpack/sql/jdbc/jdbc/TypeConverterTests.java @@ -10,8 +10,8 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.Mode; import org.joda.time.DateTime; import java.sql.JDBCType; @@ -51,7 +51,7 @@ private Object convertAsNative(Object value, JDBCType type) throws Exception { XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); builder.field("value"); - SqlQueryResponse.value(builder, AbstractSqlRequest.Mode.JDBC, value); + SqlQueryResponse.value(builder, Mode.JDBC, value); builder.endObject(); builder.close(); Object copy = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2().get("value"); diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java index 64f38c2254c5f..8e030f36dd042 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java @@ -5,11 +5,12 @@ */ package org.elasticsearch.xpack.sql.cli.command; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.shared.ClientException; import org.elasticsearch.xpack.sql.client.shared.Version; import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; +import org.elasticsearch.xpack.sql.proto.MainResponse; +import org.elasticsearch.xpack.sql.proto.Protocol; import java.sql.SQLException; @@ -18,7 +19,7 @@ */ public class CliSession { private final HttpClient httpClient; - private int fetchSize = AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; + private int fetchSize = Protocol.FETCH_SIZE; private String fetchSeparator = ""; private boolean debug; diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java index 635c041da7ae6..e637386f9798f 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java @@ -5,8 +5,8 @@ */ package org.elasticsearch.xpack.sql.cli.command; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.xpack.sql.cli.CliTerminal; +import org.elasticsearch.xpack.sql.proto.MainResponse; import java.sql.SQLException; import java.util.Locale; @@ -30,7 +30,7 @@ public boolean doHandle(CliTerminal terminal, CliSession cliSession, String line } terminal.line() .text("Node:").em(info.getNodeName()) - .text(" Cluster:").em(info.getClusterName().value()) + .text(" Cluster:").em(info.getClusterName()) .text(" Version:").em(info.getVersion().toString()) .ln(); return true; diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java index c1fc609c50b8f..aa8bc499cd29e 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -9,7 +9,7 @@ import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; import org.elasticsearch.xpack.sql.plugin.CliFormatter; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.SQLException; @@ -23,8 +23,8 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l String data; try { response = cliClient.queryInit(line, cliSession.getFetchSize()); - cliFormatter = new CliFormatter(response); - data = cliFormatter.formatWithHeader(response); + cliFormatter = new CliFormatter(response.columns(), response.rows()); + data = cliFormatter.formatWithHeader(response.columns(), response.rows()); while (true) { handleText(terminal, data); if (response.cursor().isEmpty()) { @@ -36,7 +36,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l terminal.println(cliSession.getFetchSeparator()); } response = cliSession.getClient().nextPage(response.cursor()); - data = cliFormatter.formatWithoutHeader(response); + data = cliFormatter.formatWithoutHeader(response.rows()); } } catch (SQLException e) { if (JreHttpUrlConnection.SQL_STATE_BAD_SERVER.equals(e.getSQLState())) { diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java index befcddf9e7d25..e5643ad443a59 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.sql.cli; import org.elasticsearch.Build; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.UUIDs; import org.elasticsearch.test.ESTestCase; @@ -14,6 +13,7 @@ import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.shared.ClientException; import org.elasticsearch.xpack.sql.client.shared.Version; +import org.elasticsearch.xpack.sql.proto.MainResponse; import java.sql.SQLException; @@ -28,7 +28,7 @@ public class CliSessionTests extends ESTestCase { public void testProperConnection() throws Exception { HttpClient httpClient = mock(HttpClient.class); when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT, - ClusterName.DEFAULT, UUIDs.randomBase64UUID(), Build.CURRENT)); + ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); CliSession cliSession = new CliSession(httpClient); cliSession.checkConnection(); verify(httpClient, times(1)).serverInfo(); @@ -58,10 +58,10 @@ public void testWrongServerVersion() throws Exception { } when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.fromString(major + "." + minor + ".23"), - ClusterName.DEFAULT, UUIDs.randomBase64UUID(), Build.CURRENT)); + ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); CliSession cliSession = new CliSession(httpClient); expectThrows(ClientException.class, cliSession::checkConnection); verify(httpClient, times(1)).serverInfo(); verifyNoMoreInteractions(httpClient); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java index 567cd10531d71..e99cb2fb7f7e2 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java @@ -6,12 +6,12 @@ package org.elasticsearch.xpack.sql.cli.command; import org.elasticsearch.Build; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.UUIDs; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.cli.TestTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.proto.MainResponse; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -36,7 +36,7 @@ public void testShowInfo() throws Exception { HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); when(client.serverInfo()).thenReturn(new MainResponse("my_node", org.elasticsearch.Version.fromString("1.2.3"), - new ClusterName("my_cluster"), UUIDs.randomBase64UUID(), Build.CURRENT)); + new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID(), Build.CURRENT)); ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "info")); assertEquals(testTerminal.toString(), "Node:my_node Cluster:my_cluster Version:1.2.3\n"); @@ -44,4 +44,4 @@ public void testShowInfo() throws Exception { verifyNoMoreInteractions(client); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java index 4385731313aaf..86ebfa52fe49f 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java @@ -8,8 +8,8 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.cli.TestTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; -import org.elasticsearch.xpack.sql.plugin.ColumnInfo; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.JDBCType; import java.sql.SQLException; @@ -119,4 +119,4 @@ private SqlQueryResponse fakeResponse(String cursor, boolean includeColumns, Str } return new SqlQueryResponse(cursor, columns, rows); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java index 8969b88161935..8d34d59c1e0af 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlQueryRequest.java @@ -10,12 +10,17 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.type.DataType; import java.io.IOException; import java.util.Collections; @@ -28,20 +33,12 @@ * Base class for requests that contain sql queries (Query and Translate) */ public abstract class AbstractSqlQueryRequest extends AbstractSqlRequest implements CompositeIndicesRequest, ToXContentFragment { - public static final TimeZone DEFAULT_TIME_ZONE = TimeZone.getTimeZone("UTC"); - - /** - * Global choice for the default fetch size. - */ - public static final int DEFAULT_FETCH_SIZE = 1000; - public static final TimeValue DEFAULT_REQUEST_TIMEOUT = TimeValue.timeValueSeconds(90); - public static final TimeValue DEFAULT_PAGE_TIMEOUT = TimeValue.timeValueSeconds(45); private String query = ""; - private TimeZone timeZone = DEFAULT_TIME_ZONE; - private int fetchSize = DEFAULT_FETCH_SIZE; - private TimeValue requestTimeout = DEFAULT_REQUEST_TIMEOUT; - private TimeValue pageTimeout = DEFAULT_PAGE_TIMEOUT; + private TimeZone timeZone = Protocol.TIME_ZONE; + private int fetchSize = Protocol.FETCH_SIZE; + private TimeValue requestTimeout = Protocol.REQUEST_TIMEOUT; + private TimeValue pageTimeout = Protocol.PAGE_TIMEOUT; @Nullable private QueryBuilder filter = null; private List params = Collections.emptyList(); @@ -69,11 +66,10 @@ protected static ObjectParser objec parser.declareObjectArray(AbstractSqlQueryRequest::params, (p, c) -> SqlTypedParamValue.fromXContent(p), new ParseField("params")); parser.declareString((request, zoneId) -> request.timeZone(TimeZone.getTimeZone(zoneId)), new ParseField("time_zone")); parser.declareInt(AbstractSqlQueryRequest::fetchSize, new ParseField("fetch_size")); + parser.declareString((request, timeout) -> request.requestTimeout(TimeValue.parseTimeValue(timeout, Protocol.REQUEST_TIMEOUT, + "request_timeout")), new ParseField("request_timeout")); parser.declareString( - (request, timeout) -> request.requestTimeout(TimeValue.parseTimeValue(timeout, DEFAULT_REQUEST_TIMEOUT, "request_timeout")), - new ParseField("request_timeout")); - parser.declareString( - (request, timeout) -> request.pageTimeout(TimeValue.parseTimeValue(timeout, DEFAULT_PAGE_TIMEOUT, "page_timeout")), + (request, timeout) -> request.pageTimeout(TimeValue.parseTimeValue(timeout, Protocol.PAGE_TIMEOUT, "page_timeout")), new ParseField("page_timeout")); parser.declareObject(AbstractSqlQueryRequest::filter, (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), new ParseField("filter")); @@ -185,7 +181,7 @@ public QueryBuilder filter() { public AbstractSqlQueryRequest(StreamInput in) throws IOException { super(in); query = in.readString(); - params = in.readList(SqlTypedParamValue::new); + params = in.readList(AbstractSqlQueryRequest::readSqlTypedParamValue); timeZone = TimeZone.getTimeZone(in.readString()); fetchSize = in.readVInt(); requestTimeout = in.readTimeValue(); @@ -193,11 +189,23 @@ public AbstractSqlQueryRequest(StreamInput in) throws IOException { filter = in.readOptionalNamedWriteable(QueryBuilder.class); } + public static void writeSqlTypedParamValue(StreamOutput out, SqlTypedParamValue value) throws IOException { + out.writeEnum(value.dataType); + out.writeGenericValue(value.value); + } + + public static SqlTypedParamValue readSqlTypedParamValue(StreamInput in) throws IOException { + return new SqlTypedParamValue(in.readEnum(DataType.class), in.readGenericValue()); + } + @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(query); - out.writeList(params); + out.writeVInt(params.size()); + for (SqlTypedParamValue param: params) { + writeSqlTypedParamValue(out, param); + } out.writeString(timeZone.getID()); out.writeVInt(fetchSize); out.writeTimeValue(requestTimeout); @@ -224,36 +232,4 @@ public boolean equals(Object o) { public int hashCode() { return Objects.hash(super.hashCode(), query, timeZone, fetchSize, requestTimeout, pageTimeout, filter); } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (query != null) { - builder.field("query", query); - } - if (this.params.isEmpty() == false) { - builder.startArray("params"); - for (SqlTypedParamValue val : this.params) { - val.toXContent(builder, params); - } - builder.endArray(); - } - if (timeZone != null) { - builder.field("time_zone", timeZone.getID()); - } - if (fetchSize != DEFAULT_FETCH_SIZE) { - builder.field("fetch_size", fetchSize); - } - if (requestTimeout != DEFAULT_REQUEST_TIMEOUT) { - builder.field("request_timeout", requestTimeout.getStringRep()); - } - if (pageTimeout != DEFAULT_PAGE_TIMEOUT) { - builder.field("page_timeout", pageTimeout.getStringRep()); - } - if (filter != null) { - builder.field("filter"); - filter.toXContent(builder, params); - } - return builder; - } - } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java index bc4b1e81e44b3..2cb23f796d609 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/AbstractSqlRequest.java @@ -10,9 +10,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.xpack.sql.proto.Mode; import java.io.IOException; -import java.util.Locale; import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; @@ -24,24 +24,6 @@ */ public abstract class AbstractSqlRequest extends ActionRequest implements ToXContent { - public enum Mode { - PLAIN, - JDBC; - - public static Mode fromString(String mode) { - if (mode == null) { - return PLAIN; - } - return Mode.valueOf(mode.toUpperCase(Locale.ROOT)); - } - - - @Override - public String toString() { - return this.name().toLowerCase(Locale.ROOT); - } - } - private Mode mode = Mode.PLAIN; protected AbstractSqlRequest() { diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java index 9d9a9ea04a487..359652fa4f203 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatter.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.io.IOException; import java.util.Arrays; @@ -28,19 +29,19 @@ public class CliFormatter implements Writeable { /** * Create a new {@linkplain CliFormatter} for formatting responses similar - * to the provided {@link SqlQueryResponse}. + * to the provided columns and rows. */ - public CliFormatter(SqlQueryResponse response) { + public CliFormatter(List columns, List> rows) { // Figure out the column widths: // 1. Start with the widths of the column names - width = new int[response.columns().size()]; + width = new int[columns.size()]; for (int i = 0; i < width.length; i++) { // TODO read the width from the data type? - width[i] = Math.max(MIN_COLUMN_WIDTH, response.columns().get(i).name().length()); + width[i] = Math.max(MIN_COLUMN_WIDTH, columns.get(i).name().length()); } // 2. Expand columns to fit the largest value - for (List row : response.rows()) { + for (List row : rows) { for (int i = 0; i < width.length; i++) { // TODO are we sure toString is correct here? What about dates that come back as longs. // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 @@ -62,15 +63,15 @@ public void writeTo(StreamOutput out) throws IOException { * Format the provided {@linkplain SqlQueryResponse} for the CLI * including the header lines. */ - public String formatWithHeader(SqlQueryResponse response) { + public String formatWithHeader(List columns, List> rows) { // The header lines - StringBuilder sb = new StringBuilder(estimateSize(response.rows().size() + 2)); + StringBuilder sb = new StringBuilder(estimateSize(rows.size() + 2)); for (int i = 0; i < width.length; i++) { if (i > 0) { sb.append('|'); } - String name = response.columns().get(i).name(); + String name = columns.get(i).name(); // left padding int leftPadding = (width[i] - name.length()) / 2; for (int j = 0; j < leftPadding; j++) { @@ -98,19 +99,19 @@ public String formatWithHeader(SqlQueryResponse response) { /* Now format the results. Sadly, this means that column * widths are entirely determined by the first batch of * results. */ - return formatWithoutHeader(sb, response); + return formatWithoutHeader(sb, rows); } /** * Format the provided {@linkplain SqlQueryResponse} for the CLI * without the header lines. */ - public String formatWithoutHeader(SqlQueryResponse response) { - return formatWithoutHeader(new StringBuilder(estimateSize(response.rows().size())), response); + public String formatWithoutHeader(List> rows) { + return formatWithoutHeader(new StringBuilder(estimateSize(rows.size())), rows); } - private String formatWithoutHeader(StringBuilder sb, SqlQueryResponse response) { - for (List row : response.rows()) { + private String formatWithoutHeader(StringBuilder sb, List> rows) { + for (List row : rows) { for (int i = 0; i < width.length; i++) { if (i > 0) { sb.append('|'); @@ -138,7 +139,7 @@ private String formatWithoutHeader(StringBuilder sb, SqlQueryResponse response) } /** - * Pick a good estimate of the buffer size needed to contain the rows. + * Pick a good estimate of the buffer size needed to contain the rows. */ int estimateSize(int rows) { /* Each column has either a '|' or a '\n' after it diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java deleted file mode 100644 index 72d5932f51137..0000000000000 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/MetaColumnInfo.java +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.sql.plugin; - -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; - -import java.io.IOException; -import java.sql.JDBCType; -import java.util.Objects; - -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; - -/** - * Information about a column returned by the listColumns response - */ -public class MetaColumnInfo implements Writeable, ToXContentObject { - - private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("column_info", true, objects -> - new MetaColumnInfo( - (String) objects[0], - (String) objects[1], - (String) objects[2], - objects[3] == null ? null : JDBCType.valueOf((int) objects[3]), - objects[4] == null ? 0 : (int) objects[4], - (int) objects[5])); - - private static final ParseField TABLE = new ParseField("table"); - private static final ParseField NAME = new ParseField("name"); - private static final ParseField ES_TYPE = new ParseField("type"); - private static final ParseField JDBC_TYPE = new ParseField("jdbc_type"); - private static final ParseField SIZE = new ParseField("size"); - private static final ParseField POSITION = new ParseField("position"); - - static { - PARSER.declareString(constructorArg(), TABLE); - PARSER.declareString(constructorArg(), NAME); - PARSER.declareString(constructorArg(), ES_TYPE); - PARSER.declareInt(optionalConstructorArg(), JDBC_TYPE); - PARSER.declareInt(optionalConstructorArg(), SIZE); - PARSER.declareInt(constructorArg(), POSITION); - } - - private final String table; - private final String name; - private final String esType; - @Nullable - private final JDBCType jdbcType; - private final int size; - private final int position; - - public MetaColumnInfo(String table, String name, String esType, JDBCType jdbcType, int size, int position) { - this.table = table; - this.name = name; - this.esType = esType; - this.jdbcType = jdbcType; - this.size = size; - this.position = position; - } - - public MetaColumnInfo(String table, String name, String esType, int position) { - this(table, name, esType, null, 0, position); - } - - MetaColumnInfo(StreamInput in) throws IOException { - table = in.readString(); - name = in.readString(); - esType = in.readString(); - if (in.readBoolean()) { - jdbcType = JDBCType.valueOf(in.readVInt()); - size = in.readVInt(); - } else { - jdbcType = null; - size = 0; - } - position = in.readVInt(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(table); - out.writeString(name); - out.writeString(esType); - if (jdbcType != null) { - out.writeBoolean(true); - out.writeVInt(jdbcType.getVendorTypeNumber()); - out.writeVInt(size); - } else { - out.writeBoolean(false); - } - out.writeVInt(position); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("table", table); - builder.field("name", name); - builder.field("type", esType); - if (jdbcType != null) { - builder.field("jdbc_type", jdbcType.getVendorTypeNumber()); - builder.field("size", size); - } - builder.field("position", position); - return builder.endObject(); - } - - - public static MetaColumnInfo fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - /** - * Name of the table. - */ - public String table() { - return table; - } - - /** - * Name of the column. - */ - public String name() { - return name; - } - - /** - * The type of the column in Elasticsearch. - */ - public String esType() { - return esType; - } - - /** - * The type of the column as it would be returned by a JDBC driver. - */ - public JDBCType jdbcType() { - return jdbcType; - } - - /** - * Precision - */ - public int size() { - return size; - } - - /** - * Column position with in the tables - */ - public int position() { - return position; - } - - @Override - public String toString() { - return Strings.toString(this); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - MetaColumnInfo that = (MetaColumnInfo) o; - return size == that.size && - position == that.position && - Objects.equals(table, that.table) && - Objects.equals(name, that.name) && - Objects.equals(esType, that.esType) && - jdbcType == that.jdbcType; - } - - @Override - public int hashCode() { - return Objects.hash(table, name, esType, jdbcType, size, position); - } - -} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java index ed64fa2a41e57..f0b91640f981f 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorAction.java @@ -13,7 +13,6 @@ public class SqlClearCursorAction public static final SqlClearCursorAction INSTANCE = new SqlClearCursorAction(); public static final String NAME = "indices:data/read/sql/close_cursor"; - public static final String REST_ENDPOINT = "/_xpack/sql/close"; private SqlClearCursorAction() { super(NAME); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java index 0dfb9f71e38f1..45dda28588726 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequest.java @@ -10,9 +10,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.sql.proto.Mode; import java.io.IOException; import java.util.Objects; @@ -23,13 +23,13 @@ /** * Request to clean all SQL resources associated with the cursor */ -public class SqlClearCursorRequest extends AbstractSqlRequest implements ToXContentObject { +public class SqlClearCursorRequest extends AbstractSqlRequest { private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>(SqlClearCursorAction.NAME, true, (objects, mode) -> new SqlClearCursorRequest( - mode, - (String) objects[0] - )); + new ConstructingObjectParser<>(SqlClearCursorAction.NAME, true, (objects, mode) -> new SqlClearCursorRequest( + mode, + (String) objects[0] + )); static { PARSER.declareString(constructorArg(), new ParseField("cursor")); @@ -96,13 +96,11 @@ public int hashCode() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - builder.field("cursor", cursor); - builder.endObject(); - return builder; + // This is needed just to test round-trip compatibility with proto.SqlClearCursorRequest + return new org.elasticsearch.xpack.sql.proto.SqlClearCursorRequest(mode(), cursor).toXContent(builder, params); } public static SqlClearCursorRequest fromXContent(XContentParser parser, Mode mode) { return PARSER.apply(parser, mode); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java index b157d65dfff84..3bb3df9a47ffd 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponse.java @@ -6,13 +6,10 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.RestStatus; import java.io.IOException; @@ -20,20 +17,13 @@ import static org.elasticsearch.rest.RestStatus.NOT_FOUND; import static org.elasticsearch.rest.RestStatus.OK; +import static org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse.SUCCEEDED; /** * Response to the request to clean all SQL resources associated with the cursor */ public class SqlClearCursorResponse extends ActionResponse implements StatusToXContentObject { - private static final ParseField SUCCEEDED = new ParseField("succeeded"); - public static final ObjectParser PARSER = - new ObjectParser<>(SqlClearCursorAction.NAME, true, SqlClearCursorResponse::new); - static { - PARSER.declareBoolean(SqlClearCursorResponse::setSucceeded, SUCCEEDED); - } - - private boolean succeeded; public SqlClearCursorResponse(boolean succeeded) { @@ -93,9 +83,4 @@ public int hashCode() { return Objects.hash(succeeded); } - public static SqlClearCursorResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); - } - - } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java index fd46799608c73..cbcf626adad55 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryAction.java @@ -12,7 +12,6 @@ public class SqlQueryAction extends Action PARSER = objectParser(SqlQueryRequest::new); public static final ParseField CURSOR = new ParseField("cursor"); @@ -37,7 +38,7 @@ public class SqlQueryRequest extends AbstractSqlQueryRequest implements ToXConte static { PARSER.declareString(SqlQueryRequest::cursor, CURSOR); PARSER.declareObject(SqlQueryRequest::filter, - (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), FILTER); + (p, c) -> AbstractQueryBuilder.parseInnerQueryBuilder(p), FILTER); } private String cursor = ""; @@ -108,24 +109,15 @@ public String getDescription() { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject(); - super.toXContent(builder, params); - if (cursor != null) { - builder.field("cursor", cursor); - } - builder.endObject(); - return builder; - } - - @Override - public boolean isFragment() { - return false; + // This is needed just to test round-trip compatibility with proto.SqlQueryRequest + return new org.elasticsearch.xpack.sql.proto.SqlQueryRequest(mode(), query(), params(), timeZone(), fetchSize(), + requestTimeout(), pageTimeout(), filter(), cursor()).toXContent(builder, params); } public static SqlQueryRequest fromXContent(XContentParser parser, Mode mode) { - SqlQueryRequest request = PARSER.apply(parser, null); + SqlQueryRequest request = PARSER.apply(parser, null); request.mode(mode); return request; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java index a08af6f8ce4b0..1eddd09d89d35 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestBuilder.java @@ -9,25 +9,22 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.util.Collections; import java.util.List; import java.util.TimeZone; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_TIME_ZONE; - /** * The builder to build sql request */ public class SqlQueryRequestBuilder extends ActionRequestBuilder { public SqlQueryRequestBuilder(ElasticsearchClient client, SqlQueryAction action) { - this(client, action, "", Collections.emptyList(), null, DEFAULT_TIME_ZONE, DEFAULT_FETCH_SIZE, DEFAULT_REQUEST_TIMEOUT, - DEFAULT_PAGE_TIMEOUT, "", Mode.PLAIN); + this(client, action, "", Collections.emptyList(), null, Protocol.TIME_ZONE, Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, + Protocol.PAGE_TIMEOUT, "", Mode.PLAIN); } public SqlQueryRequestBuilder(ElasticsearchClient client, SqlQueryAction action, String query, List params, diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java index e0de05cd77438..118ba81f82df0 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponse.java @@ -7,49 +7,28 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ConstructingObjectParser; -import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.Mode; import org.joda.time.ReadableDateTime; import java.io.IOException; +import java.sql.JDBCType; import java.util.ArrayList; import java.util.List; import java.util.Objects; import static java.util.Collections.unmodifiableList; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; -import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; /** * Response to perform an sql query */ public class SqlQueryResponse extends ActionResponse implements ToXContentObject { - @SuppressWarnings("unchecked") - public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("sql", true, - objects -> new SqlQueryResponse( - objects[0] == null ? "" : (String) objects[0], - (List) objects[1], - (List>) objects[2])); - - public static final ParseField CURSOR = new ParseField("cursor"); - public static final ParseField COLUMNS = new ParseField("columns"); - public static final ParseField ROWS = new ParseField("rows"); - - static { - PARSER.declareString(optionalConstructorArg(), CURSOR); - PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ColumnInfo.fromXContent(p), COLUMNS); - PARSER.declareField(constructorArg(), (p, c) -> parseRows(p), ROWS, ValueType.OBJECT_ARRAY); - } - // TODO: Simplify cursor handling private String cursor; private List columns; @@ -109,7 +88,7 @@ public void readFrom(StreamInput in) throws IOException { int columnCount = in.readVInt(); List columns = new ArrayList<>(columnCount); for (int c = 0; c < columnCount; c++) { - columns.add(new ColumnInfo(in)); + columns.add(readColumnInfo(in)); } this.columns = unmodifiableList(columns); } else { @@ -139,7 +118,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); out.writeVInt(columns.size()); for (ColumnInfo column : columns) { - column.writeTo(out); + writeColumnInfo(out, column); } } out.writeVInt(rows.size()); @@ -155,7 +134,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - AbstractSqlRequest.Mode mode = AbstractSqlRequest.Mode.fromString(params.param("mode")); + Mode mode = Mode.fromString(params.param("mode")); builder.startObject(); { if (columns != null) { @@ -187,8 +166,8 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws /** * Serializes the provided value in SQL-compatible way based on the client mode */ - public static XContentBuilder value(XContentBuilder builder, AbstractSqlRequest.Mode mode, Object value) throws IOException { - if (mode == AbstractSqlRequest.Mode.JDBC && value instanceof ReadableDateTime) { + public static XContentBuilder value(XContentBuilder builder, Mode mode, Object value) throws IOException { + if (mode == Mode.JDBC && value instanceof ReadableDateTime) { // JDBC cannot parse dates in string format builder.value(((ReadableDateTime) value).getMillis()); } else { @@ -197,34 +176,33 @@ public static XContentBuilder value(XContentBuilder builder, AbstractSqlRequest. return builder; } - public static SqlQueryResponse fromXContent(XContentParser parser) { - return PARSER.apply(parser, null); + public static ColumnInfo readColumnInfo(StreamInput in) throws IOException { + String table = in.readString(); + String name = in.readString(); + String esType = in.readString(); + JDBCType jdbcType; + int displaySize; + if (in.readBoolean()) { + jdbcType = JDBCType.valueOf(in.readVInt()); + displaySize = in.readVInt(); + } else { + jdbcType = null; + displaySize = 0; + } + return new ColumnInfo(table, name, esType, jdbcType, displaySize); } - public static List> parseRows(XContentParser parser) throws IOException { - List> list = new ArrayList<>(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - if (parser.currentToken() == XContentParser.Token.START_ARRAY) { - list.add(parseRow(parser)); - } else { - throw new IllegalStateException("expected start array but got [" + parser.currentToken() + "]"); - } - } - return list; - } - - public static List parseRow(XContentParser parser) throws IOException { - List list = new ArrayList<>(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - if (parser.currentToken().isValue()) { - list.add(parseFieldsValue(parser)); - } else if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { - list.add(null); - } else { - throw new IllegalStateException("expected value but got [" + parser.currentToken() + "]"); - } + public static void writeColumnInfo(StreamOutput out, ColumnInfo columnInfo) throws IOException { + out.writeString(columnInfo.table()); + out.writeString(columnInfo.name()); + out.writeString(columnInfo.esType()); + if (columnInfo.jdbcType() != null) { + out.writeBoolean(true); + out.writeVInt(columnInfo.jdbcType().getVendorTypeNumber()); + out.writeVInt(columnInfo.displaySize()); + } else { + out.writeBoolean(false); } - return list; } @Override diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java index 93e0630745100..103bfe5fddd69 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequest.java @@ -10,8 +10,11 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.io.IOException; import java.util.List; @@ -56,4 +59,14 @@ public static SqlTranslateRequest fromXContent(XContentParser parser, Mode mode) request.mode(mode); return request; } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + // This is needed just to test parsing of SqlTranslateRequest, so we can reuse SqlQuerySerialization + return new org.elasticsearch.xpack.sql.proto.SqlQueryRequest(mode(), query(), params(), timeZone(), fetchSize(), + requestTimeout(), pageTimeout(), filter(), null).toXContent(builder, params); + + } + + } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java index 11adc975014ca..d6d97c19297de 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestBuilder.java @@ -9,27 +9,25 @@ import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.util.Collections; import java.util.List; import java.util.TimeZone; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest.DEFAULT_TIME_ZONE; - /** * Builder for the request for the sql action for translating SQL queries into ES requests */ public class SqlTranslateRequestBuilder extends ActionRequestBuilder { public SqlTranslateRequestBuilder(ElasticsearchClient client, SqlTranslateAction action) { - this(client, action, AbstractSqlRequest.Mode.PLAIN, null, null, Collections.emptyList(), DEFAULT_TIME_ZONE, DEFAULT_FETCH_SIZE, - DEFAULT_REQUEST_TIMEOUT, DEFAULT_PAGE_TIMEOUT); + this(client, action, Mode.PLAIN, null, null, Collections.emptyList(), Protocol.TIME_ZONE, Protocol.FETCH_SIZE, + Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT); } - public SqlTranslateRequestBuilder(ElasticsearchClient client, SqlTranslateAction action, AbstractSqlRequest.Mode mode, String query, + public SqlTranslateRequestBuilder(ElasticsearchClient client, SqlTranslateAction action, Mode mode, String query, QueryBuilder filter, List params, TimeZone timeZone, int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout) { super(client, action, new SqlTranslateRequest(mode, query, params, filter, timeZone, fetchSize, requestTimeout, pageTimeout)); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java new file mode 100644 index 0000000000000..2001aecdac5d8 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/AbstractSqlRequest.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.xcontent.ToXContentFragment; + +import java.util.Objects; + +/** + * Base request for all SQL-related requests for JDBC/CLI client + *

+ * Contains information about the client mode that can be used to generate different responses based on the caller type. + */ +public abstract class AbstractSqlRequest implements ToXContentFragment { + + private final Mode mode; + + protected AbstractSqlRequest(Mode mode) { + this.mode = mode; + } + + public Mode mode() { + return mode; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + AbstractSqlRequest that = (AbstractSqlRequest) o; + return mode == that.mode; + } + + @Override + public int hashCode() { + return Objects.hash(mode); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java similarity index 70% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java rename to x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java index 5c12c776dd198..ad2f687ae0bef 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/ColumnInfo.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java @@ -3,14 +3,11 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.plugin; +package org.elasticsearch.xpack.sql.proto; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -26,16 +23,16 @@ /** * Information about a column returned with first query response */ -public final class ColumnInfo implements Writeable, ToXContentObject { +public class ColumnInfo implements ToXContentObject { private static final ConstructingObjectParser PARSER = - new ConstructingObjectParser<>("column_info", true, objects -> - new ColumnInfo( - objects[0] == null ? "" : (String) objects[0], - (String) objects[1], - (String) objects[2], - objects[3] == null ? null : JDBCType.valueOf((int) objects[3]), - objects[4] == null ? 0 : (int) objects[4])); + new ConstructingObjectParser<>("column_info", true, objects -> + new ColumnInfo( + objects[0] == null ? "" : (String) objects[0], + (String) objects[1], + (String) objects[2], + objects[3] == null ? null : JDBCType.valueOf((int) objects[3]), + objects[4] == null ? 0 : (int) objects[4])); private static final ParseField TABLE = new ParseField("table"); private static final ParseField NAME = new ParseField("name"); @@ -74,33 +71,6 @@ public ColumnInfo(String table, String name, String esType) { this.displaySize = 0; } - ColumnInfo(StreamInput in) throws IOException { - table = in.readString(); - name = in.readString(); - esType = in.readString(); - if (in.readBoolean()) { - jdbcType = JDBCType.valueOf(in.readVInt()); - displaySize = in.readVInt(); - } else { - jdbcType = null; - displaySize = 0; - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeString(table); - out.writeString(name); - out.writeString(esType); - if (jdbcType != null) { - out.writeBoolean(true); - out.writeVInt(jdbcType.getVendorTypeNumber()); - out.writeVInt(displaySize); - } else { - out.writeBoolean(false); - } - } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -162,10 +132,10 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; ColumnInfo that = (ColumnInfo) o; return displaySize == that.displaySize && - Objects.equals(table, that.table) && - Objects.equals(name, that.name) && - Objects.equals(esType, that.esType) && - jdbcType == that.jdbcType; + Objects.equals(table, that.table) && + Objects.equals(name, that.name) && + Objects.equals(esType, that.esType) && + jdbcType == that.jdbcType; } @Override diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java new file mode 100644 index 0000000000000..73b6cbc529ec6 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java @@ -0,0 +1,107 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.Build; +import org.elasticsearch.Version; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +/** + * Main (/) response for JDBC/CLI client + */ +public class MainResponse { + private String nodeName; + // TODO: Add parser for Version + private Version version; + private String clusterName; + private String clusterUuid; + // TODO: Add parser for Build + private Build build; + + private MainResponse() { + } + + public MainResponse(String nodeName, Version version, String clusterName, String clusterUuid, Build build) { + this.nodeName = nodeName; + this.version = version; + this.clusterName = clusterName; + this.clusterUuid = clusterUuid; + this.build = build; + } + + public String getNodeName() { + return nodeName; + } + + public Version getVersion() { + return version; + } + + public String getClusterName() { + return clusterName; + } + + public String getClusterUuid() { + return clusterUuid; + } + + public Build getBuild() { + return build; + } + + private static final ObjectParser PARSER = new ObjectParser<>(MainResponse.class.getName(), true, + MainResponse::new); + + static { + PARSER.declareString((response, value) -> response.nodeName = value, new ParseField("name")); + PARSER.declareString((response, value) -> response.clusterName = value, new ParseField("cluster_name")); + PARSER.declareString((response, value) -> response.clusterUuid = value, new ParseField("cluster_uuid")); + PARSER.declareString((response, value) -> { + }, new ParseField("tagline")); + PARSER.declareObject((response, value) -> { + final String buildFlavor = (String) value.get("build_flavor"); + final String buildType = (String) value.get("build_type"); + response.build = + new Build( + buildFlavor == null ? Build.Flavor.UNKNOWN : Build.Flavor.fromDisplayName(buildFlavor), + buildType == null ? Build.Type.UNKNOWN : Build.Type.fromDisplayName(buildType), + (String) value.get("build_hash"), + (String) value.get("build_date"), + (boolean) value.get("build_snapshot")); + response.version = Version.fromString((String) value.get("number")); + }, (parser, context) -> parser.map(), new ParseField("version")); + } + + public static MainResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MainResponse other = (MainResponse) o; + return Objects.equals(nodeName, other.nodeName) && + Objects.equals(version, other.version) && + Objects.equals(clusterUuid, other.clusterUuid) && + Objects.equals(build, other.build) && + Objects.equals(clusterName, other.clusterName); + } + + @Override + public int hashCode() { + return Objects.hash(nodeName, version, clusterUuid, build, clusterName); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java new file mode 100644 index 0000000000000..02f175ca80d79 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java @@ -0,0 +1,30 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import java.util.Locale; + +/** + * SQL protocol mode + */ +public enum Mode { + PLAIN, + JDBC; + + public static Mode fromString(String mode) { + if (mode == null) { + return PLAIN; + } + return Mode.valueOf(mode.toUpperCase(Locale.ROOT)); + } + + + @Override + public String toString() { + return this.name().toLowerCase(Locale.ROOT); + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java new file mode 100644 index 0000000000000..a61978828c80c --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Protocol.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.unit.TimeValue; + +import java.util.TimeZone; + +/** + * Sql protocol defaults and end-points shared between JDBC and REST protocol implementations + */ +public final class Protocol { + public static final TimeZone TIME_ZONE = TimeZone.getTimeZone("UTC"); + + /** + * Global choice for the default fetch size. + */ + public static final int FETCH_SIZE = 1000; + public static final TimeValue REQUEST_TIMEOUT = TimeValue.timeValueSeconds(90); + public static final TimeValue PAGE_TIMEOUT = TimeValue.timeValueSeconds(45); + + /** + * SQL-related endpoints + */ + public static final String CLEAR_CURSOR_REST_ENDPOINT = "/_xpack/sql/close"; + public static final String SQL_QUERY_REST_ENDPOINT = "/_xpack/sql"; +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java new file mode 100644 index 0000000000000..310dde4430210 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorRequest.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request to clean all SQL resources associated with the cursor for JDBC/CLI client + */ +public class SqlClearCursorRequest extends AbstractSqlRequest { + + private final String cursor; + + public SqlClearCursorRequest(Mode mode, String cursor) { + super(mode); + this.cursor = cursor; + } + + public String getCursor() { + return cursor; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + SqlClearCursorRequest that = (SqlClearCursorRequest) o; + return Objects.equals(cursor, that.cursor); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), cursor); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("cursor", cursor); + return builder; + } +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java new file mode 100644 index 0000000000000..b56a8335d20d5 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlClearCursorResponse.java @@ -0,0 +1,61 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Response to the request to clean all SQL resources associated with the cursor for JDBC/CLI client + */ +public class SqlClearCursorResponse { + + public static final ParseField SUCCEEDED = new ParseField("succeeded"); + public static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>(SqlClearCursorResponse.class.getName(), true, + objects -> new SqlClearCursorResponse(objects[0] == null ? false : (boolean) objects[0])); + + static { + PARSER.declareBoolean(optionalConstructorArg(), SUCCEEDED); + } + + + private final boolean succeeded; + + public SqlClearCursorResponse(boolean succeeded) { + this.succeeded = succeeded; + } + + /** + * @return Whether the attempt to clear a cursor was successful. + */ + public boolean isSucceeded() { + return succeeded; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlClearCursorResponse response = (SqlClearCursorResponse) o; + return succeeded == response.succeeded; + } + + @Override + public int hashCode() { + return Objects.hash(succeeded); + } + + public static SqlClearCursorResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java new file mode 100644 index 0000000000000..00a1696a05f60 --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryRequest.java @@ -0,0 +1,172 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.TimeZone; + +/** + * Sql query request for JDBC/CLI client + */ +public class SqlQueryRequest extends AbstractSqlRequest { + @Nullable + private final String cursor; + private final String query; + private final TimeZone timeZone; + private final int fetchSize; + private final TimeValue requestTimeout; + private final TimeValue pageTimeout; + @Nullable + private final ToXContent filter; + private final List params; + + + public SqlQueryRequest(Mode mode, String query, List params, TimeZone timeZone, + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout, ToXContent filter, String cursor) { + super(mode); + this.query = query; + this.params = params; + this.timeZone = timeZone; + this.fetchSize = fetchSize; + this.requestTimeout = requestTimeout; + this.pageTimeout = pageTimeout; + this.filter = filter; + this.cursor = cursor; + } + + public SqlQueryRequest(Mode mode, String query, List params, ToXContent filter, TimeZone timeZone, + int fetchSize, TimeValue requestTimeout, TimeValue pageTimeout) { + this(mode, query, params, timeZone, fetchSize, requestTimeout, pageTimeout, filter, null); + } + + public SqlQueryRequest(Mode mode, String cursor, TimeValue requestTimeout, TimeValue pageTimeout) { + this(mode, "", Collections.emptyList(), Protocol.TIME_ZONE, Protocol.FETCH_SIZE, requestTimeout, pageTimeout, null, cursor); + } + + + /** + * The key that must be sent back to SQL to access the next page of + * results. + */ + public String cursor() { + return cursor; + } + + /** + * Text of SQL query + */ + public String query() { + return query; + } + + /** + * An optional list of parameters if the SQL query is parametrized + */ + public List params() { + return params; + } + + /** + * The client's time zone + */ + public TimeZone timeZone() { + return timeZone; + } + + + /** + * Hint about how many results to fetch at once. + */ + public int fetchSize() { + return fetchSize; + } + + /** + * The timeout specified on the search request + */ + public TimeValue requestTimeout() { + return requestTimeout; + } + + /** + * The scroll timeout + */ + public TimeValue pageTimeout() { + return pageTimeout; + } + + /** + * An optional Query DSL defined query that can added as a filter on the top of the SQL query + */ + public ToXContent filter() { + return filter; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + SqlQueryRequest that = (SqlQueryRequest) o; + return fetchSize == that.fetchSize && + Objects.equals(query, that.query) && + Objects.equals(params, that.params) && + Objects.equals(timeZone, that.timeZone) && + Objects.equals(requestTimeout, that.requestTimeout) && + Objects.equals(pageTimeout, that.pageTimeout) && + Objects.equals(filter, that.filter) && + Objects.equals(cursor, that.cursor); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), query, timeZone, fetchSize, requestTimeout, pageTimeout, filter, cursor); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (query != null) { + builder.field("query", query); + } + if (this.params.isEmpty() == false) { + builder.startArray("params"); + for (SqlTypedParamValue val : this.params) { + val.toXContent(builder, params); + } + builder.endArray(); + } + if (timeZone != null) { + builder.field("time_zone", timeZone.getID()); + } + if (fetchSize != Protocol.FETCH_SIZE) { + builder.field("fetch_size", fetchSize); + } + if (requestTimeout != Protocol.REQUEST_TIMEOUT) { + builder.field("request_timeout", requestTimeout.getStringRep()); + } + if (pageTimeout != Protocol.PAGE_TIMEOUT) { + builder.field("page_timeout", pageTimeout.getStringRep()); + } + if (filter != null) { + builder.field("filter"); + filter.toXContent(builder, params); + } + if (cursor != null) { + builder.field("cursor", cursor); + } + return builder; + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java new file mode 100644 index 0000000000000..8937261237c7f --- /dev/null +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlQueryResponse.java @@ -0,0 +1,122 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.sql.proto; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; + +/** + * Response to perform an sql query for JDBC/CLI client + */ +public class SqlQueryResponse { + + @SuppressWarnings("unchecked") + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("sql", true, + objects -> new SqlQueryResponse( + objects[0] == null ? "" : (String) objects[0], + (List) objects[1], + (List>) objects[2])); + + public static final ParseField CURSOR = new ParseField("cursor"); + public static final ParseField COLUMNS = new ParseField("columns"); + public static final ParseField ROWS = new ParseField("rows"); + + static { + PARSER.declareString(optionalConstructorArg(), CURSOR); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> ColumnInfo.fromXContent(p), COLUMNS); + PARSER.declareField(constructorArg(), (p, c) -> parseRows(p), ROWS, ValueType.OBJECT_ARRAY); + } + + // TODO: Simplify cursor handling + private final String cursor; + private final List columns; + // TODO investigate reusing Page here - it probably is much more efficient + private final List> rows; + + public SqlQueryResponse(String cursor, @Nullable List columns, List> rows) { + this.cursor = cursor; + this.columns = columns; + this.rows = rows; + } + + /** + * The key that must be sent back to SQL to access the next page of + * results. If equal to "" then there is no next page. + */ + public String cursor() { + return cursor; + } + + public long size() { + return rows.size(); + } + + public List columns() { + return columns; + } + + public List> rows() { + return rows; + } + + public static SqlQueryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + + public static List> parseRows(XContentParser parser) throws IOException { + List> list = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken() == XContentParser.Token.START_ARRAY) { + list.add(parseRow(parser)); + } else { + throw new IllegalStateException("expected start array but got [" + parser.currentToken() + "]"); + } + } + return list; + } + + public static List parseRow(XContentParser parser) throws IOException { + List list = new ArrayList<>(); + while (parser.nextToken() != XContentParser.Token.END_ARRAY) { + if (parser.currentToken().isValue()) { + list.add(parseFieldsValue(parser)); + } else if (parser.currentToken() == XContentParser.Token.VALUE_NULL) { + list.add(null); + } else { + throw new IllegalStateException("expected value but got [" + parser.currentToken() + "]"); + } + } + return list; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlQueryResponse that = (SqlQueryResponse) o; + return Objects.equals(cursor, that.cursor) && + Objects.equals(columns, that.columns) && + Objects.equals(rows, that.rows); + } + + @Override + public int hashCode() { + return Objects.hash(cursor, columns, rows); + } + +} diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java similarity index 76% rename from x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java rename to x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java index ffde82fab3491..a85b66b80a34d 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlTypedParamValue.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/SqlTypedParamValue.java @@ -3,12 +3,9 @@ * or more contributor license agreements. Licensed under the Elastic License; * you may not use this file except in compliance with the Elastic License. */ -package org.elasticsearch.xpack.sql.plugin; +package org.elasticsearch.xpack.sql.proto; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -25,12 +22,12 @@ /** * Represent a strongly typed parameter value */ -public class SqlTypedParamValue implements ToXContentObject, Writeable { +public class SqlTypedParamValue implements ToXContentObject { private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("params", true, objects -> new SqlTypedParamValue( - objects[0], - DataType.fromEsType((String) objects[1]))); + DataType.fromEsType((String) objects[1]), objects[0] + )); private static final ParseField VALUE = new ParseField("value"); private static final ParseField TYPE = new ParseField("type"); @@ -43,7 +40,7 @@ public class SqlTypedParamValue implements ToXContentObject, Writeable { public final Object value; public final DataType dataType; - public SqlTypedParamValue(Object value, DataType dataType) { + public SqlTypedParamValue(DataType dataType, Object value) { this.value = value; this.dataType = dataType; } @@ -61,17 +58,6 @@ public static SqlTypedParamValue fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeEnum(dataType); - out.writeGenericValue(value); - } - - public SqlTypedParamValue(StreamInput in) throws IOException { - dataType = in.readEnum(DataType.class); - value = in.readGenericValue(); - } - @Override public boolean equals(Object o) { if (this == o) { @@ -94,4 +80,4 @@ public int hashCode() { public String toString() { return String.valueOf(value) + "[" + dataType + "]"; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java index 83546924a38f8..e479ae8b4f1ea 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorRequestTests.java @@ -8,17 +8,18 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractSerializingTestCase; +import org.elasticsearch.xpack.sql.proto.Mode; import org.junit.Before; import java.io.IOException; import java.util.function.Consumer; public class SqlClearCursorRequestTests extends AbstractSerializingTestCase { - public AbstractSqlRequest.Mode testMode; + public Mode testMode; @Before public void setup() { - testMode = randomFrom(AbstractSqlRequest.Mode.values()); + testMode = randomFrom(Mode.values()); } @Override @@ -40,7 +41,7 @@ protected SqlClearCursorRequest doParseInstance(XContentParser parser) { protected SqlClearCursorRequest mutateInstance(SqlClearCursorRequest instance) throws IOException { @SuppressWarnings("unchecked") Consumer mutator = randomFrom( - request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(AbstractSqlRequest.Mode.values()))), + request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(Mode.values()))), request -> request.setCursor(randomValueOtherThan(request.getCursor(), SqlQueryResponseTests::randomStringCursor)) ); SqlClearCursorRequest newRequest = new SqlClearCursorRequest(instance.mode(), instance.getCursor()); diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java index 0ef2875d8e7dd..94964428bb4f3 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlClearCursorResponseTests.java @@ -27,6 +27,8 @@ protected SqlClearCursorResponse mutateInstance(SqlClearCursorResponse instance) @Override protected SqlClearCursorResponse doParseInstance(XContentParser parser) { - return SqlClearCursorResponse.fromXContent(parser); + org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse response = + org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse.fromXContent(parser); + return new SqlClearCursorResponse(response.isSucceeded()); } } diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java index 5fbe4e42d48f2..0e4a183ab1626 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryRequestTests.java @@ -14,6 +14,8 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.type.DataType; import org.junit.Before; @@ -28,11 +30,11 @@ public class SqlQueryRequestTests extends AbstractSerializingTestCase { - public AbstractSqlRequest.Mode testMode; + public Mode testMode; @Before public void setup() { - testMode = randomFrom(AbstractSqlRequest.Mode.values()); + testMode = randomFrom(Mode.values()); } @Override @@ -63,11 +65,11 @@ public List randomParameters() { List arr = new ArrayList<>(len); for (int i = 0; i < len; i++) { @SuppressWarnings("unchecked") Supplier supplier = randomFrom( - () -> new SqlTypedParamValue(randomBoolean(), DataType.BOOLEAN), - () -> new SqlTypedParamValue(randomLong(), DataType.LONG), - () -> new SqlTypedParamValue(randomDouble(), DataType.DOUBLE), - () -> new SqlTypedParamValue(null, DataType.NULL), - () -> new SqlTypedParamValue(randomAlphaOfLength(10), DataType.KEYWORD) + () -> new SqlTypedParamValue(DataType.BOOLEAN, randomBoolean()), + () -> new SqlTypedParamValue(DataType.LONG, randomLong()), + () -> new SqlTypedParamValue(DataType.DOUBLE, randomDouble()), + () -> new SqlTypedParamValue(DataType.NULL, null), + () -> new SqlTypedParamValue(DataType.KEYWORD, randomAlphaOfLength(10)) ); arr.add(supplier.get()); } @@ -93,7 +95,7 @@ protected SqlQueryRequest doParseInstance(XContentParser parser) { protected SqlQueryRequest mutateInstance(SqlQueryRequest instance) { @SuppressWarnings("unchecked") Consumer mutator = randomFrom( - request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(AbstractSqlRequest.Mode.values()))), + request -> request.mode(randomValueOtherThan(request.mode(), () -> randomFrom(Mode.values()))), request -> request.query(randomValueOtherThan(request.query(), () -> randomAlphaOfLength(5))), request -> request.params(randomValueOtherThan(request.params(), this::randomParameters)), request -> request.timeZone(randomValueOtherThan(request.timeZone(), ESTestCase::randomTimeZone)), diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java index 42c08bb09142f..bc5e5ae2a0180 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlQueryResponseTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.AbstractStreamableXContentTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.io.IOException; import java.sql.JDBCType; @@ -114,6 +115,8 @@ public void testToXContent() throws IOException { @Override protected SqlQueryResponse doParseInstance(XContentParser parser) { - return SqlQueryResponse.fromXContent(parser); + org.elasticsearch.xpack.sql.proto.SqlQueryResponse response = + org.elasticsearch.xpack.sql.proto.SqlQueryResponse.fromXContent(parser); + return new SqlQueryResponse(response.cursor(), response.columns(), response.rows()); } } diff --git a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java index 21b002293768f..2eb3d71bbf410 100644 --- a/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java +++ b/x-pack/plugin/sql/sql-proto/src/test/java/org/elasticsearch/xpack/sql/plugin/SqlTranslateRequestTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.proto.Mode; import org.junit.Before; import java.io.IOException; @@ -25,11 +26,11 @@ public class SqlTranslateRequestTests extends AbstractSerializingTestCase { - public AbstractSqlRequest.Mode testMode; + public Mode testMode; @Before public void setup() { - testMode = randomFrom(AbstractSqlRequest.Mode.values()); + testMode = randomFrom(Mode.values()); } @Override @@ -71,7 +72,7 @@ protected SqlTranslateRequest mutateInstance(SqlTranslateRequest instance) throw request -> request.query(randomValueOtherThan(request.query(), () -> randomAlphaOfLength(5))), request -> request.timeZone(randomValueOtherThan(request.timeZone(), ESTestCase::randomTimeZone)), request -> request.fetchSize(randomValueOtherThan(request.fetchSize(), () -> between(1, Integer.MAX_VALUE))), - request -> request.requestTimeout(randomValueOtherThan(request.requestTimeout(), () -> randomTV())), + request -> request.requestTimeout(randomValueOtherThan(request.requestTimeout(), this::randomTV)), request -> request.filter(randomValueOtherThan(request.filter(), () -> request.filter() == null ? randomFilter(random()) : randomFilterOrNull(random()))) ); diff --git a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index bf7c245b24cbe..8f77d5397e948 100644 --- a/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-shared-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -5,7 +5,6 @@ */ package org.elasticsearch.xpack.sql.client; -import org.elasticsearch.action.main.MainResponse; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -22,13 +21,14 @@ import org.elasticsearch.xpack.sql.client.shared.ConnectionConfiguration; import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection; import org.elasticsearch.xpack.sql.client.shared.JreHttpUrlConnection.ResponseOrException; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest; -import org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction; -import org.elasticsearch.xpack.sql.plugin.SqlClearCursorRequest; -import org.elasticsearch.xpack.sql.plugin.SqlClearCursorResponse; -import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; -import org.elasticsearch.xpack.sql.plugin.SqlQueryRequest; -import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.AbstractSqlRequest; +import org.elasticsearch.xpack.sql.proto.MainResponse; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; +import org.elasticsearch.xpack.sql.proto.SqlClearCursorRequest; +import org.elasticsearch.xpack.sql.proto.SqlClearCursorResponse; +import org.elasticsearch.xpack.sql.proto.SqlQueryRequest; +import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.io.IOException; import java.io.InputStream; @@ -50,7 +50,7 @@ public class HttpClient { private final ConnectionConfiguration cfg; - public HttpClient(ConnectionConfiguration cfg) throws SQLException { + public HttpClient(ConnectionConfiguration cfg) { this.cfg = cfg; } @@ -66,26 +66,25 @@ public MainResponse serverInfo() throws SQLException { public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException { // TODO allow customizing the time zone - this is what session set/reset/get should be about - SqlQueryRequest sqlRequest = new SqlQueryRequest(AbstractSqlRequest.Mode.PLAIN, query, Collections.emptyList(), null, + SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.PLAIN, query, Collections.emptyList(), null, TimeZone.getTimeZone("UTC"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), - TimeValue.timeValueMillis(cfg.pageTimeout()), "" - ); + TimeValue.timeValueMillis(cfg.pageTimeout())); return query(sqlRequest); } public SqlQueryResponse query(SqlQueryRequest sqlRequest) throws SQLException { - return post(SqlQueryAction.REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); + return post(Protocol.SQL_QUERY_REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); } public SqlQueryResponse nextPage(String cursor) throws SQLException { - SqlQueryRequest sqlRequest = new SqlQueryRequest(); - sqlRequest.cursor(cursor); - return post(SqlQueryAction.REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); + SqlQueryRequest sqlRequest = new SqlQueryRequest(Mode.PLAIN, cursor, TimeValue.timeValueMillis(cfg.queryTimeout()), + TimeValue.timeValueMillis(cfg.pageTimeout())); + return post(Protocol.SQL_QUERY_REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); } public boolean queryClose(String cursor) throws SQLException { - SqlClearCursorResponse response = post(SqlClearCursorAction.REST_ENDPOINT, - new SqlClearCursorRequest(AbstractSqlRequest.Mode.PLAIN, cursor), + SqlClearCursorResponse response = post(Protocol.CLEAR_CURSOR_REST_ENDPOINT, + new SqlClearCursorRequest(Mode.PLAIN, cursor), SqlClearCursorResponse::fromXContent); return response.isSucceeded(); } @@ -167,4 +166,4 @@ private Response fromXContent(XContentType xContentType, BytesReferen throw new ClientException("Cannot parse response", ex); } } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java index 8c58769b75962..23f1a6049dc2c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/PlanExecutor.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.sql.plan.physical.EsQueryExec; import org.elasticsearch.xpack.sql.planner.Planner; import org.elasticsearch.xpack.sql.planner.PlanningException; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.RowSet; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java index de28f33187260..48aa2cf1fa79b 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/AstBuilder.java @@ -8,7 +8,7 @@ import org.antlr.v4.runtime.Token; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SingleStatementContext; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.util.Map; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java index bf432a7236357..7ce65aa4cfec1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -33,7 +33,7 @@ import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTableTypes; import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTables; import org.elasticsearch.xpack.sql.plan.logical.command.sys.SysTypes; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.util.StringUtils; @@ -190,4 +190,4 @@ public SysTypes visitSysTypes(SysTypesContext ctx) { public Object visitSysTableTypes(SysTableTypesContext ctx) { return new SysTableTypes(source(ctx)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java index b14611f9f599f..a6185def278a1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/ExpressionBuilder.java @@ -76,7 +76,7 @@ import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringLiteralContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.StringQueryContext; import org.elasticsearch.xpack.sql.parser.SqlBaseParser.SubqueryExpressionContext; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.tree.Location; import org.elasticsearch.xpack.sql.type.DataType; import org.elasticsearch.xpack.sql.type.DataTypes; @@ -516,4 +516,4 @@ private SqlTypedParamValue param(TerminalNode node) { return params.get(token); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java index f41fce1602783..3435994a0fc42 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/LogicalPlanBuilder.java @@ -41,7 +41,7 @@ import org.elasticsearch.xpack.sql.plan.logical.SubQueryAlias; import org.elasticsearch.xpack.sql.plan.logical.UnresolvedRelation; import org.elasticsearch.xpack.sql.plan.logical.With; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.session.EmptyExecutable; import org.elasticsearch.xpack.sql.type.DataType; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java index 7aa3748e31eae..b7fe9178f911f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/SqlParser.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import java.util.Arrays; import java.util.BitSet; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java index 4d47ca8c373e1..534d0459180e0 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlClearCursorAction.java @@ -12,23 +12,25 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; import java.io.IOException; import static org.elasticsearch.rest.RestRequest.Method.POST; -import static org.elasticsearch.xpack.sql.plugin.SqlClearCursorAction.REST_ENDPOINT; + public class RestSqlClearCursorAction extends BaseRestHandler { public RestSqlClearCursorAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(POST, REST_ENDPOINT, this); + controller.registerHandler(POST, Protocol.CLEAR_CURSOR_REST_ENDPOINT, this); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { SqlClearCursorRequest sqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - sqlRequest = SqlClearCursorRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + sqlRequest = SqlClearCursorRequest.fromXContent(parser, Mode.fromString(request.param("mode"))); } return channel -> client.executeLocally(SqlClearCursorAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); } @@ -37,4 +39,4 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli public String getName() { return "sql_translate_action"; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java index 9d043f855fd44..9e34a3fb2e097 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlQueryAction.java @@ -18,6 +18,8 @@ import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.RestResponseListener; +import org.elasticsearch.xpack.sql.proto.Mode; +import org.elasticsearch.xpack.sql.proto.Protocol; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.Cursors; @@ -31,15 +33,15 @@ public class RestSqlQueryAction extends BaseRestHandler { public RestSqlQueryAction(Settings settings, RestController controller) { super(settings); - controller.registerHandler(GET, SqlQueryAction.REST_ENDPOINT, this); - controller.registerHandler(POST, SqlQueryAction.REST_ENDPOINT, this); + controller.registerHandler(GET, Protocol.SQL_QUERY_REST_ENDPOINT, this); + controller.registerHandler(POST, Protocol.SQL_QUERY_REST_ENDPOINT, this); } @Override protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { SqlQueryRequest sqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - sqlRequest = SqlQueryRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + sqlRequest = SqlQueryRequest.fromXContent(parser,Mode.fromString(request.param("mode"))); } /* diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java index 6167e4e571dff..503ee84314820 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/RestSqlTranslateAction.java @@ -12,6 +12,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.sql.proto.Mode; import java.io.IOException; @@ -32,7 +33,7 @@ public RestSqlTranslateAction(Settings settings, RestController controller) { protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { SqlTranslateRequest sqlRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - sqlRequest = SqlTranslateRequest.fromXContent(parser, AbstractSqlRequest.Mode.fromString(request.param("mode"))); + sqlRequest = SqlTranslateRequest.fromXContent(parser, Mode.fromString(request.param("mode"))); } return channel -> client.executeLocally(SqlTranslateAction.INSTANCE, sqlRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java index 8a3ef973d6bf1..b15ff6a1ae4aa 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlLicenseChecker.java @@ -5,6 +5,8 @@ */ package org.elasticsearch.xpack.sql.plugin; +import org.elasticsearch.xpack.sql.proto.Mode; + import java.util.function.Consumer; /** @@ -12,16 +14,16 @@ */ public class SqlLicenseChecker { - private final Consumer checkIfSqlAllowed; + private final Consumer checkIfSqlAllowed; - public SqlLicenseChecker(Consumer checkIfSqlAllowed) { + public SqlLicenseChecker(Consumer checkIfSqlAllowed) { this.checkIfSqlAllowed = checkIfSqlAllowed; } /** * Throws an ElasticsearchSecurityException if the specified mode is not allowed */ - public void checkIfSqlAllowed(AbstractSqlRequest.Mode mode) { + public void checkIfSqlAllowed(Mode mode) { checkIfSqlAllowed.accept(mode); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index 349a481cf660f..9d0cd60c23e32 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -7,6 +7,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.Cursors; import org.elasticsearch.xpack.sql.util.StringUtils; @@ -38,17 +39,17 @@ String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { final CliFormatter formatter; if (cursor instanceof CliFormatterCursor) { formatter = ((CliFormatterCursor) cursor).getCliFormatter(); - return formatter.formatWithoutHeader(response); + return formatter.formatWithoutHeader(response.rows()); } else { - formatter = new CliFormatter(response); - return formatter.formatWithHeader(response); + formatter = new CliFormatter(response.columns(), response.rows()); + return formatter.formatWithHeader(response.columns(), response.rows()); } } @Override Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { CliFormatter formatter = (oldCursor instanceof CliFormatterCursor) ? - ((CliFormatterCursor) oldCursor).getCliFormatter() : new CliFormatter(response); + ((CliFormatterCursor) oldCursor).getCliFormatter() : new CliFormatter(response.columns(), response.rows()); return CliFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java index 5b59ced7a494d..46429e2d50829 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TransportSqlQueryAction.java @@ -16,6 +16,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.sql.execution.PlanExecutor; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursors; import org.elasticsearch.xpack.sql.session.RowSet; @@ -26,7 +27,7 @@ import java.util.List; import static java.util.Collections.unmodifiableList; -import static org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode.JDBC; +import static org.elasticsearch.xpack.sql.proto.Mode.JDBC; public class TransportSqlQueryAction extends HandledTransportAction { private final PlanExecutor planExecutor; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java index 681a5eb1fbd24..ae43d4a988922 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Configuration.java @@ -9,16 +9,14 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.sql.plugin.AbstractSqlQueryRequest; +import org.elasticsearch.xpack.sql.proto.Protocol; import java.util.TimeZone; -// Typed object holding properties for a given action +// Typed object holding properties for a given action public class Configuration { public static final Configuration DEFAULT = new Configuration(TimeZone.getTimeZone("UTC"), - AbstractSqlQueryRequest.DEFAULT_FETCH_SIZE, - AbstractSqlQueryRequest.DEFAULT_REQUEST_TIMEOUT, - AbstractSqlQueryRequest.DEFAULT_PAGE_TIMEOUT, - null); + Protocol.FETCH_SIZE, Protocol.REQUEST_TIMEOUT, Protocol.PAGE_TIMEOUT, null); private TimeZone timeZone; private int pageSize; diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java index 880e98c606408..65da32c3122ab 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java @@ -21,7 +21,7 @@ import org.elasticsearch.xpack.sql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.sql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.sql.planner.Planner; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.rule.RuleExecutor; import java.util.List; @@ -162,4 +162,4 @@ public void sqlExecutable(String sql, List params, ActionLis public Configuration settings() { return settings; } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java index b36fa811d3b25..22a7889f6247e 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java @@ -7,10 +7,10 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.WriteRequest; -import org.elasticsearch.xpack.sql.plugin.AbstractSqlRequest.Mode; -import org.elasticsearch.xpack.sql.plugin.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.plugin.SqlQueryAction; import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; +import org.elasticsearch.xpack.sql.proto.Mode; import java.sql.JDBCType; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java index 0cd8c33b11688..bac221df2e92d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.plugin.CliFormatter; import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; -import org.elasticsearch.xpack.sql.plugin.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.plugin.SqlQueryResponse; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; @@ -80,7 +80,8 @@ static Cursor randomNonEmptyCursor() { () -> { SqlQueryResponse response = createRandomSqlResponse(); if (response.columns() != null && response.rows() != null) { - return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), new CliFormatter(response)); + return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), + new CliFormatter(response.columns(), response.rows())); } else { return ScrollCursorTests.randomScrollCursor(); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java index 5e35965985987..37ab5fb2b6ce3 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/expression/ParameterTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.xpack.sql.expression.predicate.Equals; import org.elasticsearch.xpack.sql.parser.ParsingException; import org.elasticsearch.xpack.sql.parser.SqlParser; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.type.DataType; import java.util.Arrays; @@ -28,7 +28,7 @@ public class ParameterTests extends ESTestCase { public void testSingleParameter() { Expression expression = new SqlParser().createExpression("a = \n?", Collections.singletonList( - new SqlTypedParamValue("foo", DataType.KEYWORD) + new SqlTypedParamValue(DataType.KEYWORD, "foo") )); logger.info(expression); assertThat(expression, instanceOf(Equals.class)); @@ -42,10 +42,10 @@ public void testSingleParameter() { public void testMultipleParameters() { Expression expression = new SqlParser().createExpression("(? + ? * ?) - ?", Arrays.asList( - new SqlTypedParamValue(1L, DataType.LONG), - new SqlTypedParamValue(2L, DataType.LONG), - new SqlTypedParamValue(3L, DataType.LONG), - new SqlTypedParamValue(4L, DataType.LONG) + new SqlTypedParamValue(DataType.LONG, 1L), + new SqlTypedParamValue(DataType.LONG, 2L), + new SqlTypedParamValue(DataType.LONG, 3L), + new SqlTypedParamValue(DataType.LONG, 4L) )); assertThat(expression, instanceOf(Sub.class)); Sub sub = (Sub) expression; @@ -62,9 +62,9 @@ public void testMultipleParameters() { public void testNotEnoughParameters() { ParsingException ex = expectThrows(ParsingException.class, () -> new SqlParser().createExpression("(? + ? * ?) - ?", Arrays.asList( - new SqlTypedParamValue(1L, DataType.LONG), - new SqlTypedParamValue(2L, DataType.LONG), - new SqlTypedParamValue(3L, DataType.LONG) + new SqlTypedParamValue(DataType.LONG, 1L), + new SqlTypedParamValue(DataType.LONG, 2L), + new SqlTypedParamValue(DataType.LONG, 3L) ))); assertThat(ex.getMessage(), containsString("Not enough actual parameters")); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java index c94bcf0e664c4..b2abf0b680054 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/parser/LikeEscapingParsingTests.java @@ -9,7 +9,7 @@ import org.elasticsearch.xpack.sql.expression.Expression; import org.elasticsearch.xpack.sql.expression.regex.Like; import org.elasticsearch.xpack.sql.expression.regex.LikePattern; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.type.DataType; import java.util.Locale; @@ -33,7 +33,7 @@ private LikePattern like(String pattern) { Expression exp = null; boolean parameterized = randomBoolean(); if (parameterized) { - exp = parser.createExpression("exp LIKE ?", singletonList(new SqlTypedParamValue(pattern, DataType.KEYWORD))); + exp = parser.createExpression("exp LIKE ?", singletonList(new SqlTypedParamValue(DataType.KEYWORD, pattern))); } else { exp = parser.createExpression(String.format(Locale.ROOT, "exp LIKE '%s'", pattern)); } @@ -63,9 +63,9 @@ public void testInvalidChar() { assertThat(error("'%string' ESCAPE '%'"), is("line 1:28: Char [%] cannot be used for escaping")); } - + public void testCannotUseStar() { assertThat(error("'|*string' ESCAPE '|'"), is("line 1:11: Invalid char [*] found in pattern [|*string] at position 1; use [%] or [_] instead")); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index c08c423be34eb..e42ec51b425d2 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.xpack.sql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.sql.parser.SqlParser; import org.elasticsearch.xpack.sql.plan.logical.command.Command; -import org.elasticsearch.xpack.sql.plugin.SqlTypedParamValue; +import org.elasticsearch.xpack.sql.proto.SqlTypedParamValue; import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; import org.elasticsearch.xpack.sql.type.DataTypes; @@ -228,7 +228,7 @@ public void testSysTablesTypesEnumerationWoString() throws Exception { } private SqlTypedParamValue param(Object value) { - return new SqlTypedParamValue(value, DataTypes.fromJava(value)); + return new SqlTypedParamValue(DataTypes.fromJava(value), value); } private Tuple sql(String sql, List params) { diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java index 1fe3c9fc89e99..d87dba3306889 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CliFormatterTests.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.sql.JDBCType; import java.util.Arrays; @@ -23,17 +24,17 @@ public class CliFormatterTests extends ESTestCase { Arrays.asList( Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit"), Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat"))); - private final CliFormatter formatter = new CliFormatter(firstResponse); + private final CliFormatter formatter = new CliFormatter(firstResponse.columns(), firstResponse.rows()); /** - * Tests for {@link CliFormatter#formatWithHeader(SqlQueryResponse)}, values + * Tests for {@link CliFormatter#formatWithHeader}, values * of exactly the minimum column size, column names of exactly * the minimum column size, column headers longer than the * minimum column size, and values longer than the minimum * column size. */ public void testFormatWithHeader() { - String[] result = formatter.formatWithHeader(firstResponse).split("\n"); + String[] result = formatter.formatWithHeader(firstResponse.columns(), firstResponse.rows()).split("\n"); assertThat(result, arrayWithSize(4)); assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz ", result[0]); assertEquals("---------------+----------------------+---------------+---------------------+---------------", result[1]); @@ -42,14 +43,14 @@ public void testFormatWithHeader() { } /** - * Tests for {@link CliFormatter#formatWithoutHeader(SqlQueryResponse)} and + * Tests for {@link CliFormatter#formatWithoutHeader} and * truncation of long columns. */ public void testFormatWithoutHeader() { - String[] result = formatter.formatWithoutHeader(new SqlQueryResponse("", null, + String[] result = formatter.formatWithoutHeader( Arrays.asList( Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat"), - Arrays.asList("dog", 2, 123124.888, 9912, "goat")))).split("\n"); + Arrays.asList("dog", 2, 123124.888, 9912, "goat"))).split("\n"); assertThat(result, arrayWithSize(2)); assertEquals("ohnotruncatedd~|4 |1 |77 |wombat ", result[0]); assertEquals("dog |2 |123124.888 |9912 |goat ", result[1]); @@ -59,9 +60,9 @@ public void testFormatWithoutHeader() { * Ensure that our estimates are perfect in at least some cases. */ public void testEstimateSize() { - assertEquals(formatter.formatWithHeader(firstResponse).length(), + assertEquals(formatter.formatWithHeader(firstResponse.columns(), firstResponse.rows()).length(), formatter.estimateSize(firstResponse.rows().size() + 2)); - assertEquals(formatter.formatWithoutHeader(firstResponse).length(), + assertEquals(formatter.formatWithoutHeader(firstResponse.rows()).length(), formatter.estimateSize(firstResponse.rows().size())); } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java index 1c6bbfa69e816..bf6ccbb225a54 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/TextFormatTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.rest.RestRequest; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.xpack.sql.proto.ColumnInfo; import java.util.ArrayList; import java.util.List; diff --git a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java index a5fc1575f484a..14bdd533c6b38 100644 --- a/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java +++ b/x-pack/qa/ml-native-tests/src/test/java/org/elasticsearch/xpack/ml/integration/ForecastIT.java @@ -206,6 +206,7 @@ public void testMemoryStatus() throws Exception { assertThat(e.getMessage(), equalTo("Cannot run forecast: Forecast cannot be executed as model memory status is not OK")); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/30399") public void testMemoryLimit() throws Exception { Detector.Builder detector = new Detector.Builder("mean", "value"); detector.setByFieldName("clientIP"); diff --git a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java index 5833ef6dae5a1..f7abb6f64f63c 100644 --- a/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java +++ b/x-pack/qa/sql/security/src/test/java/org/elasticsearch/xpack/qa/sql/security/RestSqlSecurityIT.java @@ -236,11 +236,7 @@ public void testHijackScrollFails() throws Exception { createAuditLogAsserter() .expectSqlCompositeAction("test_admin", "test") .expect(true, SQL_ACTION_NAME, "full_access", empty()) - // One scroll access denied per shard - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") - .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") + // one scroll access denied per shard .expect("access_denied", SQL_ACTION_NAME, "full_access", "default_native", empty(), "InternalScrollSearchRequest") .assertLogs(); }