diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java
index 5c5a82b52f438..340e14653971b 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/IngestClient.java
@@ -24,6 +24,8 @@
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
+import org.elasticsearch.action.ingest.SimulatePipelineRequest;
+import org.elasticsearch.action.ingest.SimulatePipelineResponse;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import java.io.IOException;
@@ -125,4 +127,37 @@ public void deletePipelineAsync(DeletePipelineRequest request, RequestOptions op
restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::deletePipeline, options,
WritePipelineResponse::fromXContent, listener, emptySet());
}
+
+ /**
+ * Simulate a pipeline on a set of documents provided in the request
+ *
+ * See
+ *
+ * Simulate Pipeline API on elastic.co
+ * @param request the request
+ * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
+ * @return the response
+ * @throws IOException in case there is a problem sending the request or parsing back the response
+ */
+ public SimulatePipelineResponse simulatePipeline(SimulatePipelineRequest request, RequestOptions options) throws IOException {
+ return restHighLevelClient.performRequestAndParseEntity( request, RequestConverters::simulatePipeline, options,
+ SimulatePipelineResponse::fromXContent, emptySet());
+ }
+
+ /**
+ * Asynchronously simulate a pipeline on a set of documents provided in the request
+ *
+ * See
+ *
+ * Simulate Pipeline API on elastic.co
+ * @param request the request
+ * @param options the request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized
+ * @param listener the listener to be notified upon request completion
+ */
+ public void simulatePipelineAsync(SimulatePipelineRequest request,
+ RequestOptions options,
+ ActionListener listener) {
+ restHighLevelClient.performRequestAsyncAndParseEntity( request, RequestConverters::simulatePipeline, options,
+ SimulatePipelineResponse::fromXContent, listener, emptySet());
+ }
}
diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
index c6c53501e0dd6..3b92d09b8ed56 100644
--- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
+++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java
@@ -72,6 +72,7 @@
import org.elasticsearch.action.ingest.DeletePipelineRequest;
import org.elasticsearch.action.ingest.PutPipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineRequest;
+import org.elasticsearch.action.ingest.SimulatePipelineRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
@@ -927,6 +928,20 @@ static Request validateQuery(ValidateQueryRequest validateQueryRequest) throws I
return request;
}
+ static Request simulatePipeline(SimulatePipelineRequest simulatePipelineRequest) throws IOException {
+ EndpointBuilder builder = new EndpointBuilder().addPathPartAsIs("_ingest/pipeline");
+ if (simulatePipelineRequest.getId() != null && !simulatePipelineRequest.getId().isEmpty()) {
+ builder.addPathPart(simulatePipelineRequest.getId());
+ }
+ builder.addPathPartAsIs("_simulate");
+ String endpoint = builder.build();
+ Request request = new Request(HttpPost.METHOD_NAME, endpoint);
+ Params params = new Params(request);
+ params.putParam("verbose", Boolean.toString(simulatePipelineRequest.isVerbose()));
+ request.setEntity(createEntity(simulatePipelineRequest, REQUEST_BODY_CONTENT_TYPE));
+ return request;
+ }
+
static Request getAlias(GetAliasesRequest getAliasesRequest) {
String[] indices = getAliasesRequest.indices() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.indices();
String[] aliases = getAliasesRequest.aliases() == null ? Strings.EMPTY_ARRAY : getAliasesRequest.aliases();
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java
index 14fe0e01d31f9..d9d57a49b4f8a 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java
@@ -123,9 +123,7 @@ private HighLevelClient(RestClient restClient) {
}
}
- protected static XContentBuilder buildRandomXContentPipeline() throws IOException {
- XContentType xContentType = randomFrom(XContentType.values());
- XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent());
+ protected static XContentBuilder buildRandomXContentPipeline(XContentBuilder pipelineBuilder) throws IOException {
pipelineBuilder.startObject();
{
pipelineBuilder.field(Pipeline.DESCRIPTION_KEY, "some random set of processors");
@@ -152,6 +150,12 @@ protected static XContentBuilder buildRandomXContentPipeline() throws IOExceptio
return pipelineBuilder;
}
+ protected static XContentBuilder buildRandomXContentPipeline() throws IOException {
+ XContentType xContentType = randomFrom(XContentType.values());
+ XContentBuilder pipelineBuilder = XContentBuilder.builder(xContentType.xContent());
+ return buildRandomXContentPipeline(pipelineBuilder);
+ }
+
protected static void createPipeline(String pipelineId) throws IOException {
XContentBuilder builder = buildRandomXContentPipeline();
createPipeline(new PutPipelineRequest(pipelineId, BytesReference.bytes(builder), builder.contentType()));
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java
index ecc0d0052d415..6fd6f95059577 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IngestClientIT.java
@@ -23,12 +23,22 @@
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
+import org.elasticsearch.action.ingest.SimulateDocumentBaseResult;
+import org.elasticsearch.action.ingest.SimulateDocumentResult;
+import org.elasticsearch.action.ingest.SimulateDocumentVerboseResult;
+import org.elasticsearch.action.ingest.SimulatePipelineRequest;
+import org.elasticsearch.action.ingest.SimulatePipelineResponse;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.ingest.PipelineConfiguration;
import java.io.IOException;
+import java.util.List;
+
+import static org.hamcrest.Matchers.containsString;
+import static org.hamcrest.core.IsInstanceOf.instanceOf;
public class IngestClientIT extends ESRestHighLevelClientTestCase {
@@ -80,4 +90,93 @@ public void testDeletePipeline() throws IOException {
execute(request, highLevelClient().ingest()::deletePipeline, highLevelClient().ingest()::deletePipelineAsync);
assertTrue(response.isAcknowledged());
}
+
+ public void testSimulatePipeline() throws IOException {
+ testSimulatePipeline(false, false);
+ }
+
+ public void testSimulatePipelineWithFailure() throws IOException {
+ testSimulatePipeline(false, true);
+ }
+
+ public void testSimulatePipelineVerbose() throws IOException {
+ testSimulatePipeline(true, false);
+ }
+
+ public void testSimulatePipelineVerboseWithFailure() throws IOException {
+ testSimulatePipeline(true, true);
+ }
+
+ private void testSimulatePipeline(boolean isVerbose,
+ boolean isFailure) throws IOException {
+ XContentType xContentType = randomFrom(XContentType.values());
+ XContentBuilder builder = XContentBuilder.builder(xContentType.xContent());
+ String rankValue = isFailure ? "non-int" : Integer.toString(1234);
+ builder.startObject();
+ {
+ builder.field("pipeline");
+ buildRandomXContentPipeline(builder);
+ builder.startArray("docs");
+ {
+ builder.startObject()
+ .field("_index", "index")
+ .field("_type", "doc")
+ .field("_id", "doc_" + 1)
+ .startObject("_source").field("foo", "rab_" + 1).field("rank", rankValue).endObject()
+ .endObject();
+ }
+ builder.endArray();
+ }
+ builder.endObject();
+
+ SimulatePipelineRequest request = new SimulatePipelineRequest(
+ BytesReference.bytes(builder),
+ builder.contentType()
+ );
+ request.setVerbose(isVerbose);
+ SimulatePipelineResponse response =
+ execute(request, highLevelClient().ingest()::simulatePipeline, highLevelClient().ingest()::simulatePipelineAsync);
+ List results = response.getResults();
+ assertEquals(1, results.size());
+ if (isVerbose) {
+ assertThat(results.get(0), instanceOf(SimulateDocumentVerboseResult.class));
+ SimulateDocumentVerboseResult verboseResult = (SimulateDocumentVerboseResult) results.get(0);
+ assertEquals(2, verboseResult.getProcessorResults().size());
+ if (isFailure) {
+ assertNotNull(verboseResult.getProcessorResults().get(1).getFailure());
+ assertThat(verboseResult.getProcessorResults().get(1).getFailure().getMessage(),
+ containsString("unable to convert [non-int] to integer"));
+ } else {
+ assertEquals(
+ verboseResult.getProcessorResults().get(0).getIngestDocument()
+ .getFieldValue("foo", String.class),
+ "bar"
+ );
+ assertEquals(
+ Integer.valueOf(1234),
+ verboseResult.getProcessorResults().get(1).getIngestDocument()
+ .getFieldValue("rank", Integer.class)
+ );
+ }
+ } else {
+ assertThat(results.get(0), instanceOf(SimulateDocumentBaseResult.class));
+ SimulateDocumentBaseResult baseResult = (SimulateDocumentBaseResult)results.get(0);
+ if (isFailure) {
+ assertNotNull(baseResult.getFailure());
+ assertThat(baseResult.getFailure().getMessage(),
+ containsString("unable to convert [non-int] to integer"));
+ } else {
+ assertNotNull(baseResult.getIngestDocument());
+ assertEquals(
+ baseResult.getIngestDocument().getFieldValue("foo", String.class),
+ "bar"
+ );
+ assertEquals(
+ Integer.valueOf(1234),
+ baseResult.getIngestDocument()
+ .getFieldValue("rank", Integer.class)
+ );
+ }
+ }
+ }
}
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java
index e416b3bd29fe8..8035e1582c2dd 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java
@@ -75,6 +75,7 @@
import org.elasticsearch.action.ingest.DeletePipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.PutPipelineRequest;
+import org.elasticsearch.action.ingest.SimulatePipelineRequest;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.SearchRequest;
@@ -1622,6 +1623,34 @@ public void testDeletePipeline() {
assertEquals(expectedParams, expectedRequest.getParameters());
}
+ public void testSimulatePipeline() throws IOException {
+ String pipelineId = randomBoolean() ? "some_pipeline_id" : null;
+ boolean verbose = randomBoolean();
+ String json = "{\"pipeline\":{" +
+ "\"description\":\"_description\"," +
+ "\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]}," +
+ "\"docs\":[{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}]}";
+ SimulatePipelineRequest request = new SimulatePipelineRequest(
+ new BytesArray(json.getBytes(StandardCharsets.UTF_8)),
+ XContentType.JSON
+ );
+ request.setId(pipelineId);
+ request.setVerbose(verbose);
+ Map expectedParams = new HashMap<>();
+ expectedParams.put("verbose", Boolean.toString(verbose));
+
+ Request expectedRequest = RequestConverters.simulatePipeline(request);
+ StringJoiner endpoint = new StringJoiner("/", "/", "");
+ endpoint.add("_ingest/pipeline");
+ if (pipelineId != null && !pipelineId.isEmpty())
+ endpoint.add(pipelineId);
+ endpoint.add("_simulate");
+ assertEquals(endpoint.toString(), expectedRequest.getEndpoint());
+ assertEquals(HttpPost.METHOD_NAME, expectedRequest.getMethod());
+ assertEquals(expectedParams, expectedRequest.getParameters());
+ assertToXContentBody(request, expectedRequest.getEntity());
+ }
+
public void testClusterHealth() {
ClusterHealthRequest healthRequest = new ClusterHealthRequest();
Map expectedParams = new HashMap<>();
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java
index 8b8998baff581..c706a3a03f203 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CRUDDocumentationIT.java
@@ -114,7 +114,7 @@ public void testIndex() throws Exception {
.source(jsonMap); // <1>
//end::index-request-map
IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT);
- assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED);
+ assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
}
{
//tag::index-request-xcontent
@@ -130,7 +130,7 @@ public void testIndex() throws Exception {
.source(builder); // <1>
//end::index-request-xcontent
IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT);
- assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, indexResponse.getResult());
}
{
//tag::index-request-shortcut
@@ -140,7 +140,7 @@ public void testIndex() throws Exception {
"message", "trying out Elasticsearch"); // <1>
//end::index-request-shortcut
IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT);
- assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, indexResponse.getResult());
}
{
//tag::index-request-string
@@ -159,7 +159,7 @@ public void testIndex() throws Exception {
// tag::index-execute
IndexResponse indexResponse = client.index(request, RequestOptions.DEFAULT);
// end::index-execute
- assertEquals(indexResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, indexResponse.getResult());
// tag::index-response
String index = indexResponse.getIndex();
@@ -273,7 +273,7 @@ public void testUpdate() throws Exception {
{
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", 0);
IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT);
- assertSame(indexResponse.status(), RestStatus.CREATED);
+ assertSame(RestStatus.CREATED, indexResponse.status());
Request request = new Request("POST", "/_scripts/increment-field");
request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder()
@@ -284,7 +284,7 @@ public void testUpdate() throws Exception {
.endObject()
.endObject()));
Response response = client().performRequest(request);
- assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus());
+ assertEquals(RestStatus.OK.getStatus(), response.getStatusLine().getStatusCode());
}
{
//tag::update-request
@@ -302,7 +302,7 @@ public void testUpdate() throws Exception {
request.script(inline); // <3>
//end::update-request-with-inline-script
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
assertEquals(4, updateResponse.getGetResult().getSource().get("field"));
request = new UpdateRequest("posts", "doc", "1").fetchSource(true);
@@ -312,7 +312,7 @@ public void testUpdate() throws Exception {
request.script(stored); // <2>
//end::update-request-with-stored-script
updateResponse = client.update(request, RequestOptions.DEFAULT);
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
assertEquals(8, updateResponse.getGetResult().getSource().get("field"));
}
{
@@ -324,7 +324,7 @@ public void testUpdate() throws Exception {
.doc(jsonMap); // <1>
//end::update-request-with-doc-as-map
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
}
{
//tag::update-request-with-doc-as-xcontent
@@ -339,7 +339,7 @@ public void testUpdate() throws Exception {
.doc(builder); // <1>
//end::update-request-with-doc-as-xcontent
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
}
{
//tag::update-request-shortcut
@@ -348,7 +348,7 @@ public void testUpdate() throws Exception {
"reason", "daily update"); // <1>
//end::update-request-shortcut
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
}
{
//tag::update-request-with-doc-as-string
@@ -363,7 +363,7 @@ public void testUpdate() throws Exception {
// tag::update-execute
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
// end::update-execute
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
// tag::update-response
String index = updateResponse.getIndex();
@@ -438,7 +438,7 @@ public void testUpdate() throws Exception {
request.fetchSource(true); // <1>
//end::update-request-no-source
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
assertNotNull(updateResponse.getGetResult());
assertEquals(3, updateResponse.getGetResult().sourceAsMap().size());
}
@@ -450,7 +450,7 @@ public void testUpdate() throws Exception {
request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1>
//end::update-request-source-include
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
Map sourceAsMap = updateResponse.getGetResult().sourceAsMap();
assertEquals(2, sourceAsMap.size());
assertEquals("source includes", sourceAsMap.get("reason"));
@@ -464,7 +464,7 @@ public void testUpdate() throws Exception {
request.fetchSource(new FetchSourceContext(true, includes, excludes)); // <1>
//end::update-request-source-exclude
UpdateResponse updateResponse = client.update(request, RequestOptions.DEFAULT);
- assertEquals(updateResponse.getResult(), DocWriteResponse.Result.UPDATED);
+ assertEquals(DocWriteResponse.Result.UPDATED, updateResponse.getResult());
Map sourceAsMap = updateResponse.getGetResult().sourceAsMap();
assertEquals(2, sourceAsMap.size());
assertEquals("source excludes", sourceAsMap.get("reason"));
@@ -545,7 +545,7 @@ public void testDelete() throws Exception {
{
IndexRequest indexRequest = new IndexRequest("posts", "doc", "1").source("field", "value");
IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT);
- assertSame(indexResponse.status(), RestStatus.CREATED);
+ assertSame(RestStatus.CREATED, indexResponse.status());
}
{
@@ -559,7 +559,7 @@ public void testDelete() throws Exception {
// tag::delete-execute
DeleteResponse deleteResponse = client.delete(request, RequestOptions.DEFAULT);
// end::delete-execute
- assertSame(deleteResponse.getResult(), DocWriteResponse.Result.DELETED);
+ assertSame(DocWriteResponse.Result.DELETED, deleteResponse.getResult());
// tag::delete-response
String index = deleteResponse.getIndex();
@@ -615,7 +615,7 @@ public void testDelete() throws Exception {
{
IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "1").source("field", "value")
, RequestOptions.DEFAULT);
- assertSame(indexResponse.status(), RestStatus.CREATED);
+ assertSame(RestStatus.CREATED, indexResponse.status());
// tag::delete-conflict
try {
@@ -631,7 +631,7 @@ public void testDelete() throws Exception {
{
IndexResponse indexResponse = client.index(new IndexRequest("posts", "doc", "async").source("field", "value"),
RequestOptions.DEFAULT);
- assertSame(indexResponse.status(), RestStatus.CREATED);
+ assertSame(RestStatus.CREATED, indexResponse.status());
DeleteRequest request = new DeleteRequest("posts", "doc", "async");
@@ -676,7 +676,7 @@ public void testBulk() throws Exception {
// tag::bulk-execute
BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT);
// end::bulk-execute
- assertSame(bulkResponse.status(), RestStatus.OK);
+ assertSame(RestStatus.OK, bulkResponse.status());
assertFalse(bulkResponse.hasFailures());
}
{
@@ -689,7 +689,7 @@ public void testBulk() throws Exception {
.source(XContentType.JSON,"field", "baz"));
// end::bulk-request-with-mixed-operations
BulkResponse bulkResponse = client.bulk(request, RequestOptions.DEFAULT);
- assertSame(bulkResponse.status(), RestStatus.OK);
+ assertSame(RestStatus.OK, bulkResponse.status());
assertFalse(bulkResponse.hasFailures());
// tag::bulk-response
@@ -788,7 +788,7 @@ public void testGet() throws Exception {
"postDate", new Date(),
"message", "trying out Elasticsearch");
IndexResponse indexResponse = client.index(indexRequest, RequestOptions.DEFAULT);
- assertEquals(indexResponse.getResult(), DocWriteResponse.Result.CREATED);
+ assertEquals(DocWriteResponse.Result.CREATED, indexResponse.getResult());
}
{
//tag::get-request
diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java
index f5bdc9f2f3ee5..c53ec2b5d7cc7 100644
--- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java
+++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/IngestClientDocumentationIT.java
@@ -25,6 +25,12 @@
import org.elasticsearch.action.ingest.GetPipelineRequest;
import org.elasticsearch.action.ingest.GetPipelineResponse;
import org.elasticsearch.action.ingest.PutPipelineRequest;
+import org.elasticsearch.action.ingest.SimulateDocumentBaseResult;
+import org.elasticsearch.action.ingest.SimulateDocumentResult;
+import org.elasticsearch.action.ingest.SimulateDocumentVerboseResult;
+import org.elasticsearch.action.ingest.SimulatePipelineRequest;
+import org.elasticsearch.action.ingest.SimulatePipelineResponse;
+import org.elasticsearch.action.ingest.SimulateProcessorResult;
import org.elasticsearch.action.ingest.WritePipelineResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.RequestOptions;
@@ -277,4 +283,109 @@ public void onFailure(Exception e) {
}
}
+ public void testSimulatePipeline() throws IOException {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ // tag::simulate-pipeline-request
+ String source =
+ "{\"" +
+ "pipeline\":{" +
+ "\"description\":\"_description\"," +
+ "\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]" +
+ "}," +
+ "\"docs\":[" +
+ "{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"bar\"}}," +
+ "{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}" +
+ "]" +
+ "}";
+ SimulatePipelineRequest request = new SimulatePipelineRequest(
+ new BytesArray(source.getBytes(StandardCharsets.UTF_8)), // <1>
+ XContentType.JSON // <2>
+ );
+ // end::simulate-pipeline-request
+
+ // tag::simulate-pipeline-request-pipeline-id
+ request.setId("my-pipeline-id"); // <1>
+ // end::simulate-pipeline-request-pipeline-id
+
+ // For testing we set this back to null
+ request.setId(null);
+
+ // tag::simulate-pipeline-request-verbose
+ request.setVerbose(true); // <1>
+ // end::simulate-pipeline-request-verbose
+
+ // tag::simulate-pipeline-execute
+ SimulatePipelineResponse response = client.ingest().simulatePipeline(request, RequestOptions.DEFAULT); // <1>
+ // end::simulate-pipeline-execute
+
+ // tag::simulate-pipeline-response
+ for (SimulateDocumentResult result: response.getResults()) { // <1>
+ if (request.isVerbose()) {
+ assert result instanceof SimulateDocumentVerboseResult;
+ SimulateDocumentVerboseResult verboseResult = (SimulateDocumentVerboseResult)result; // <2>
+ for (SimulateProcessorResult processorResult: verboseResult.getProcessorResults()) { // <3>
+ processorResult.getIngestDocument(); // <4>
+ processorResult.getFailure(); // <5>
+ }
+ } else {
+ assert result instanceof SimulateDocumentBaseResult;
+ SimulateDocumentBaseResult baseResult = (SimulateDocumentBaseResult)result; // <6>
+ baseResult.getIngestDocument(); // <7>
+ baseResult.getFailure(); // <8>
+ }
+ }
+ // end::simulate-pipeline-response
+ assert(response.getResults().size() > 0);
+ }
+ }
+
+ public void testSimulatePipelineAsync() throws Exception {
+ RestHighLevelClient client = highLevelClient();
+
+ {
+ String source =
+ "{\"" +
+ "pipeline\":{" +
+ "\"description\":\"_description\"," +
+ "\"processors\":[{\"set\":{\"field\":\"field2\",\"value\":\"_value\"}}]" +
+ "}," +
+ "\"docs\":[" +
+ "{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"bar\"}}," +
+ "{\"_index\":\"index\",\"_type\":\"_doc\",\"_id\":\"id\",\"_source\":{\"foo\":\"rab\"}}" +
+ "]" +
+ "}";
+ SimulatePipelineRequest request = new SimulatePipelineRequest(
+ new BytesArray(source.getBytes(StandardCharsets.UTF_8)),
+ XContentType.JSON
+ );
+
+ // tag::simulate-pipeline-execute-listener
+ ActionListener listener =
+ new ActionListener() {
+ @Override
+ public void onResponse(SimulatePipelineResponse response) {
+ // <1>
+ }
+
+ @Override
+ public void onFailure(Exception e) {
+ // <2>
+ }
+ };
+ // end::simulate-pipeline-execute-listener
+
+ // Replace the empty listener by a blocking listener in test
+ final CountDownLatch latch = new CountDownLatch(1);
+ listener = new LatchedActionListener<>(listener, latch);
+
+ // tag::simulate-pipeline-execute-async
+ client.ingest().simulatePipelineAsync(request, RequestOptions.DEFAULT, listener); // <1>
+ // end::simulate-pipeline-execute-async
+
+ assertTrue(latch.await(30L, TimeUnit.SECONDS));
+ }
+ }
+
}
diff --git a/distribution/packages/src/common/scripts/preinst b/distribution/packages/src/common/scripts/preinst
index 2aec2172ad856..22f2405af3c2b 100644
--- a/distribution/packages/src/common/scripts/preinst
+++ b/distribution/packages/src/common/scripts/preinst
@@ -9,6 +9,18 @@
# $1=1 : indicates an new install
# $1=2 : indicates an upgrade
+# Check for these at preinst time due to failures in postinst if they do not exist
+if [ -x "$JAVA_HOME/bin/java" ]; then
+ JAVA="$JAVA_HOME/bin/java"
+else
+ JAVA=`which java`
+fi
+
+if [ -z "$JAVA" ]; then
+ echo "could not find java; set JAVA_HOME or ensure java is in PATH"
+ exit 1
+fi
+
case "$1" in
# Debian ####################################################
diff --git a/docs/java-rest/high-level/ingest/simulate_pipeline.asciidoc b/docs/java-rest/high-level/ingest/simulate_pipeline.asciidoc
new file mode 100644
index 0000000000000..9d1bbd06ceb26
--- /dev/null
+++ b/docs/java-rest/high-level/ingest/simulate_pipeline.asciidoc
@@ -0,0 +1,90 @@
+[[java-rest-high-ingest-simulate-pipeline]]
+=== Simulate Pipeline API
+
+[[java-rest-high-ingest-simulate-pipeline-request]]
+==== Simulate Pipeline Request
+
+A `SimulatePipelineRequest` requires a source and a `XContentType`. The source consists
+of the request body. See the https://www.elastic.co/guide/en/elasticsearch/reference/master/simulate-pipeline-api.html[docs]
+for more details on the request body.
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request]
+--------------------------------------------------
+<1> The request body as a `ByteArray`.
+<2> The XContentType for the request body supplied above.
+
+==== Optional arguments
+The following arguments can optionally be provided:
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request-pipeline-id]
+--------------------------------------------------
+<1> You can either specify an existing pipeline to execute against the provided documents, or supply a
+pipeline definition in the body of the request. This option sets the id for an existing pipeline.
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-request-verbose]
+--------------------------------------------------
+<1> To see the intermediate results of each processor in the simulate request, you can add the verbose parameter
+to the request.
+
+[[java-rest-high-ingest-simulate-pipeline-sync]]
+==== Synchronous Execution
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute]
+--------------------------------------------------
+<1> Execute the request and get back the response in a `SimulatePipelineResponse` object.
+
+[[java-rest-high-ingest-simulate-pipeline-async]]
+==== Asynchronous Execution
+
+The asynchronous execution of a simulate pipeline request requires both the `SimulatePipelineRequest`
+instance and an `ActionListener` instance to be passed to the asynchronous
+method:
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute-async]
+--------------------------------------------------
+<1> The `SimulatePipelineRequest` to execute and the `ActionListener` to use when
+the execution completes
+
+The asynchronous method does not block and returns immediately. Once it is
+completed the `ActionListener` is called back using the `onResponse` method
+if the execution successfully completed or using the `onFailure` method if
+it failed.
+
+A typical listener for `SimulatePipelineResponse` looks like:
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-execute-listener]
+--------------------------------------------------
+<1> Called when the execution is successfully completed. The response is
+provided as an argument
+<2> Called in case of failure. The raised exception is provided as an argument
+
+[[java-rest-high-ingest-simulate-pipeline-response]]
+==== Simulate Pipeline Response
+
+The returned `SimulatePipelineResponse` allows to retrieve information about the executed
+ operation as follows:
+
+["source","java",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{doc-tests}/IngestClientDocumentationIT.java[simulate-pipeline-response]
+--------------------------------------------------
+<1> Get results for each of the documents provided as instance of `List`.
+<2> If the request was in verbose mode cast the response to `SimulateDocumentVerboseResult`.
+<3> Check the result after each processor is applied.
+<4> Get the ingest document for the result obtained in 3.
+<5> Or get the failure for the result obtained in 3.
+<6> Get the result as `SimulateDocumentBaseResult` if the result was not verbose.
+<7> Get the ingest document for the result obtained in 6.
+<8> Or get the failure for the result obtained in 6.
diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc
index 3caab5100ca0f..9ed54db817551 100644
--- a/docs/java-rest/high-level/supported-apis.asciidoc
+++ b/docs/java-rest/high-level/supported-apis.asciidoc
@@ -125,10 +125,12 @@ The Java High Level REST Client supports the following Ingest APIs:
* <>
* <>
* <>
+* <>
include::ingest/put_pipeline.asciidoc[]
include::ingest/get_pipeline.asciidoc[]
include::ingest/delete_pipeline.asciidoc[]
+include::ingest/simulate_pipeline.asciidoc[]
== Snapshot APIs
diff --git a/docs/reference/ingest/ingest-node.asciidoc b/docs/reference/ingest/ingest-node.asciidoc
index a7aaf0177f5b9..8876cf21b02c1 100644
--- a/docs/reference/ingest/ingest-node.asciidoc
+++ b/docs/reference/ingest/ingest-node.asciidoc
@@ -1075,9 +1075,10 @@ then it aborts the execution and leaves the array unmodified.
.Foreach Options
[options="header"]
|======
-| Name | Required | Default | Description
-| `field` | yes | - | The array field
-| `processor` | yes | - | The processor to execute against each field
+| Name | Required | Default | Description
+| `field` | yes | - | The array field
+| `processor` | yes | - | The processor to execute against each field
+| `ignore_missing` | no | false | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document
|======
Assume the following document:
diff --git a/docs/reference/sql/appendix/index.asciidoc b/docs/reference/sql/appendix/index.asciidoc
new file mode 100644
index 0000000000000..b00176a8a3f67
--- /dev/null
+++ b/docs/reference/sql/appendix/index.asciidoc
@@ -0,0 +1 @@
+include::syntax-reserved.asciidoc[]
\ No newline at end of file
diff --git a/docs/reference/sql/language/reserved.asciidoc b/docs/reference/sql/appendix/syntax-reserved.asciidoc
similarity index 98%
rename from docs/reference/sql/language/reserved.asciidoc
rename to docs/reference/sql/appendix/syntax-reserved.asciidoc
index 1ae551cc43c08..7a502d6eea939 100644
--- a/docs/reference/sql/language/reserved.asciidoc
+++ b/docs/reference/sql/appendix/syntax-reserved.asciidoc
@@ -1,7 +1,8 @@
[role="xpack"]
[testenv="basic"]
-[[sql-spec-reserved]]
-=== Reserved Keywords
+[appendix]
+[[sql-syntax-reserved]]
+= Reserved Keywords
Table with reserved keywords that need to be quoted. Also provide an example to make it more obvious.
diff --git a/docs/reference/sql/concepts.asciidoc b/docs/reference/sql/concepts.asciidoc
new file mode 100644
index 0000000000000..1dc23e391fab1
--- /dev/null
+++ b/docs/reference/sql/concepts.asciidoc
@@ -0,0 +1,65 @@
+[role="xpack"]
+[testenv="basic"]
+[[sql-concepts]]
+== Conventions and Terminology
+
+For clarity, it is important to establish the meaning behind certain words as, the same wording might convey different meanings to different readers depending on one's familiarity with SQL versus {es}.
+
+NOTE: This documentation while trying to be complete, does assume the reader has _basic_ understanding of {es} and/or SQL. If that is not the case, please continue reading the documentation however take notes and pursue the topics that are unclear either through the main {es} documentation or through the plethora of SQL material available in the open (there are simply too many excellent resources here to enumerate).
+
+As a general rule, {es-sql} as the name indicates provides a SQL interface to {es}. As such, it follows the SQL terminology and conventions first, whenever possible. However the backing engine itself is {es} for which {es-sql} was purposely created hence why features or concepts that are not available, or cannot be mapped correctly, in SQL appear
+in {es-sql}.
+Last but not least, {es-sql} tries to obey the https://en.wikipedia.org/wiki/Principle_of_least_astonishment[principle of least suprise], though as all things in the world, everything is relative.
+
+=== Mapping concepts across SQL and {es}
+
+While SQL and {es} have different terms for the way the data is organized (and different semantics), essentially their purpose is the same.
+
+So let's start from the bottom; these roughly are:
+
+[cols="1,1,5", options="header"]
+|===
+|SQL
+|{es}
+|Description
+
+|`column`
+|`field`
+|In both cases, at the lowest level, data is stored in in _named_ entries, of a variety of <>, containing _one_ value. SQL calls such an entry a _column_ while {es} a _field_.
+Notice that in {es} a field can contain _multiple_ values of the same type (esentially a list) while in SQL, a _column_ can contain _exactly_ one value of said type.
+{es-sql} will do its best to preserve the SQL semantic and, depending on the query, reject those that return fields with more than one value.
+
+|`row`
+|`document`
+|++Column++s and ++field++s do _not_ exist by themselves; they are part of a `row` or a `document`. The two have slightly different semantics: a `row` tends to be _strict_ (and have more enforcements) while a `document` tends to be a bit more flexible or loose (while still having a structure).
+
+|`table`
+|`index`
+|The target against which queries, whether in SQL or {es} get executed against.
+
+|`schema`
+|_implicit_
+|In RDBMS, `schema` is mainly a namespace of tables and typically used as a security boundary. {es} does not provide an equivalent concept for it. However when security is enabled, {es} automatically applies the security enforcement so that a role sees only the data it is allowed to (in SQL jargon, its _schema_).
+
+|`catalog` or `database`
+|`cluster` instance
+|In SQL, `catalog` or `database` are used interchangebly and represent a set of schemas that is, a number of tables.
+In {es} the set of indices available are grouped in a `cluster`. The semantics also differ a bit; a `database` is essentially yet another namespace (which can have some implications on the way data is stored) while an {es} `cluster` is a runtime instance, or rather a set of at least one {es} instance (typically running distributed).
+In practice this means that while in SQL one can potentially have multiple catalogs inside an instance, in {es} one is restricted to only _one_.
+
+|`cluster`
+|`cluster` (federated)
+|Traditionally in SQL, _cluster_ refers to a single RDMBS instance which contains a number of ++catalog++s or ++database++s (see above). The same word can be reused inside {es} as well however its semantic clarified a bit.
+
+While RDBMS tend to have only one running instance, on a single machine (_not_ distributed), {es} goes the opposite way and by default, is distributed and multi-instance.
+
+Further more, an {es} `cluster` can be connected to other ++cluster++s in a _federated_ fashion thus `cluster` means:
+
+single cluster::
+Multiple {es} instances typically distributed across machines, running within the same namespace.
+multiple clusters::
+Multiple clusters, each with its own namespace, connected to each other in a federated setup (see <>).
+
+|===
+
+As one can see while the mapping between the concepts are not exactly one to one and the semantics somewhat different, there are more things in common than differences. In fact, thanks to SQL declarative nature, many concepts can move across {es} transparently and the terminology of the two likely to be used interchangebly through-out the rest of the material.
\ No newline at end of file
diff --git a/docs/reference/sql/endpoints/cli.asciidoc b/docs/reference/sql/endpoints/cli.asciidoc
index 206d687d97a5f..0908c2344bb15 100644
--- a/docs/reference/sql/endpoints/cli.asciidoc
+++ b/docs/reference/sql/endpoints/cli.asciidoc
@@ -38,18 +38,3 @@ James S.A. Corey |Leviathan Wakes |561 |1306972800000
--------------------------------------------------
// TODO it'd be lovely to be able to assert that this is correct but
// that is probably more work then it is worth right now.
-
-[[sql-cli-permissions]]
-[NOTE]
-===============================
-If you are using Security you need to add a few permissions to
-users so they can run SQL. To run SQL using the CLI a user needs
-`read`, `indices:admin/get`, and `cluster:monitor/main`. The
-following example configures a role that can run SQL in the CLI
-for the `test` and `bort` indices:
-
-["source","yaml",subs="attributes,callouts,macros"]
---------------------------------------------------
-include-tagged::{sql-tests}/security/roles.yml[cli_jdbc]
---------------------------------------------------
-===============================
diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc
index 2125cc2ee839c..6a8793f7e24e2 100644
--- a/docs/reference/sql/endpoints/jdbc.asciidoc
+++ b/docs/reference/sql/endpoints/jdbc.asciidoc
@@ -37,11 +37,11 @@ from `artifacts.elastic.co/maven` by adding it to the repositories list:
[float]
=== Setup
-The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`. Note the driver
-also implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically
+The driver main class is `org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcDriver`.
+Note the driver implements the JDBC 4.0 +Service Provider+ mechanism meaning it is registerd automatically
as long as its available in the classpath.
-Once registered, the driver expects the following syntax as an URL:
+Once registered, the driver understands the following syntax as an URL:
["source","text",subs="attributes"]
----
@@ -121,12 +121,12 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que
To put all of it together, the following URL:
-["source","text",subs="attributes"]
+["source","text"]
----
jdbc:es://http://server:3456/timezone=UTC&page.size=250
----
-Opens up a {es-jdbc} connection to `server` on port `3456`, setting the JDBC timezone to `UTC` and its pagesize to `250` entries.
+Opens up a {es-sql} connection to `server` on port `3456`, setting the JDBC connection timezone to `UTC` and its pagesize to `250` entries.
=== API usage
@@ -176,20 +176,4 @@ connection. For example:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{jdbc-tests}/SimpleExampleTestCase.java[simple_example]
---------------------------------------------------
-
-[[sql-jdbc-permissions]]
-[NOTE]
-===============================
-If you are using Security you need to add a few permissions to
-users so they can run SQL. To run SQL a user needs `read` and
-`indices:admin/get`. Some parts of the API require
-`cluster:monitor/main`. The following example configures a
-role that can run SQL in JDBC querying the `test` and `bort`
-indices:
-
-["source","yaml",subs="attributes,callouts,macros"]
---------------------------------------------------
-include-tagged::{sql-tests}/security/roles.yml[cli_jdbc]
---------------------------------------------------
-===============================
+--------------------------------------------------
\ No newline at end of file
diff --git a/docs/reference/sql/endpoints/rest.asciidoc b/docs/reference/sql/endpoints/rest.asciidoc
index ef4c08ba483bc..f33189303e682 100644
--- a/docs/reference/sql/endpoints/rest.asciidoc
+++ b/docs/reference/sql/endpoints/rest.asciidoc
@@ -188,17 +188,3 @@ or fewer results though. `time_zone` is the time zone to use for date
functions and date parsing. `time_zone` defaults to `utc` and can take
any values documented
http://www.joda.org/joda-time/apidocs/org/joda/time/DateTimeZone.html[here].
-
-[[sql-rest-permissions]]
-[NOTE]
-===============================
-If you are using Security you need to add a few permissions to
-users so they can run SQL. To run SQL a user needs `read` and
-`indices:admin/get`. The following example configures a role
-that can run SQL against the `test` and `bort` indices:
-
-["source","yaml",subs="attributes,callouts,macros"]
---------------------------------------------------
-include-tagged::{sql-tests}/security/roles.yml[rest]
---------------------------------------------------
-===============================
diff --git a/docs/reference/sql/endpoints/translate.asciidoc b/docs/reference/sql/endpoints/translate.asciidoc
index 3f2f87ab2e2f5..db450b5f914c8 100644
--- a/docs/reference/sql/endpoints/translate.asciidoc
+++ b/docs/reference/sql/endpoints/translate.asciidoc
@@ -58,18 +58,3 @@ the normal <> API.
The request body accepts all of the <> that
the <> accepts except `cursor`.
-
-[[sql-translate-permissions]]
-[NOTE]
-===============================
-If you are using Security you need to add a few permissions to
-users so they can run translate SQL. To translate SQL a user
-needs `read` and `indices:admin/get`. The following example
-configures a role that can run SQL against the `test` and
-`bort` indices:
-
-["source","yaml",subs="attributes,callouts,macros"]
---------------------------------------------------
-include-tagged::{sql-tests}/security/roles.yml[rest]
---------------------------------------------------
-===============================
diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc
index 653b5a92fec52..93d201a182828 100644
--- a/docs/reference/sql/functions/index.asciidoc
+++ b/docs/reference/sql/functions/index.asciidoc
@@ -350,6 +350,25 @@ include-tagged::{sql-specs}/datetime.csv-spec[minuteOfHour]
include-tagged::{sql-specs}/datetime.csv-spec[secondOfMinute]
--------------------------------------------------
+* Extract
+
+As an alternative, one can support `EXTRACT` to extract fields from datetimes.
+You can run any <>
+with `EXTRACT( FROM )`. So
+
+["source","sql",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{sql-specs}/datetime.csv-spec[extractDayOfYear]
+--------------------------------------------------
+
+is the equivalent to
+
+["source","sql",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear]
+--------------------------------------------------
+
+
[[sql-functions-aggregate]]
=== Aggregate Functions
diff --git a/docs/reference/sql/index.asciidoc b/docs/reference/sql/index.asciidoc
index 3d69a240a9a6f..33b9da9fab93d 100644
--- a/docs/reference/sql/index.asciidoc
+++ b/docs/reference/sql/index.asciidoc
@@ -20,7 +20,11 @@ indices and return results in tabular format.
<>::
Overview of {es-sql} and its features.
<>::
- Start using SQL right away in {es}
+ Start using SQL right away in {es}.
+<>::
+ Language conventions across SQL and {es}.
+<>::
+ Securing {es-sql} and {es}.
<>::
Accepts SQL in a JSON document, executes it, and returns the
results.
@@ -32,18 +36,20 @@ indices and return results in tabular format.
SQL and print tabular results.
<>::
A JDBC driver for {es}.
+<>::
+ Overview of the {es-sql} language, such as supported data types, commands and
+ syntax.
<>::
List of functions and operators supported.
-<>::
- Overview of the {es-sql} language, such as data types, syntax and
- reserved keywords.
-
--
include::overview.asciidoc[]
include::getting-started.asciidoc[]
+include::concepts.asciidoc[]
+include::security.asciidoc[]
include::endpoints/index.asciidoc[]
-include::functions/index.asciidoc[]
include::language/index.asciidoc[]
+include::functions/index.asciidoc[]
+include::appendix/index.asciidoc[]
:jdbc-tests!:
diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc
index 0ea152f639d61..7f98add97248b 100644
--- a/docs/reference/sql/language/data-types.asciidoc
+++ b/docs/reference/sql/language/data-types.asciidoc
@@ -1,7 +1,7 @@
[role="xpack"]
[testenv="basic"]
[[sql-data-types]]
-=== Data Types
+== Data Types
Most of {es} <> are available in {es-sql}, as indicated below:
@@ -44,7 +44,7 @@ uses the data type _particularities_ of the former over the latter as ultimately
[[sql-multi-field]]
[float]
-==== SQL and multi-fields
+=== SQL and multi-fields
A core concept in {es} is that of an `analyzed` field, that is a full-text value that is interpreted in order
to be effectively indexed. These fields are of type <> and are not used for sorting or aggregations as their actual value depends on the <> used hence why {es} also offers the <> type for storing the _exact_
diff --git a/docs/reference/sql/language/index.asciidoc b/docs/reference/sql/language/index.asciidoc
index de8528242b07a..6558e9ad92bf8 100644
--- a/docs/reference/sql/language/index.asciidoc
+++ b/docs/reference/sql/language/index.asciidoc
@@ -3,9 +3,10 @@
[[sql-spec]]
== SQL Language
-This chapter describes the SQL syntax and data types supported in X-Pack.
-As a general rule, the syntax tries to adhere as much as possible to ANSI SQL to make the transition seamless.
+This chapter describes the SQL semantics supported in X-Pack namely:
+
+<>:: Data types
+<>:: Commands
include::data-types.asciidoc[]
-include::syntax.asciidoc[]
-include::reserved.asciidoc[]
+include::syntax/index.asciidoc[]
diff --git a/docs/reference/sql/language/syntax.asciidoc b/docs/reference/sql/language/syntax.asciidoc
deleted file mode 100644
index 2565c54166095..0000000000000
--- a/docs/reference/sql/language/syntax.asciidoc
+++ /dev/null
@@ -1,125 +0,0 @@
-[role="xpack"]
-[testenv="basic"]
-[[sql-spec-syntax]]
-=== SQL Statement Syntax
-
-// Big list of the entire syntax in SQL
-
-// Each entry might get its own file and code snippet
-
-["source","sql",subs="attributes,callouts,macros"]
---------------------------------------------------
-include-tagged::{sql-specs}/select.sql-spec[wildcardWithOrder]
---------------------------------------------------
-
-
-[[sql-spec-syntax-order-by]]
-==== `ORDER BY`
-
-Elasticsearch supports `ORDER BY` for consistent ordering. You add
-any field in the index that has <> or
-`SCORE()` to sort by `_score`. By default SQL sorts on what it
-considers to be the most efficient way to get the results.
-
-So sorting by a field looks like:
-
-[source,js]
---------------------------------------------------
-POST /_xpack/sql?format=txt
-{
- "query": "SELECT * FROM library ORDER BY page_count DESC LIMIT 5"
-}
---------------------------------------------------
-// CONSOLE
-// TEST[setup:library]
-
-which results in something like:
-
-[source,text]
---------------------------------------------------
- author | name | page_count | release_date
------------------+--------------------+---------------+------------------------
-Peter F. Hamilton|Pandora's Star |768 |2004-03-02T00:00:00.000Z
-Vernor Vinge |A Fire Upon the Deep|613 |1992-06-01T00:00:00.000Z
-Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z
-Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00.000Z
-James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00.000Z
---------------------------------------------------
-// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/]
-// TESTRESPONSE[_cat]
-
-[[sql-spec-syntax-order-by-score]]
-For sorting by score to be meaningful you need to include a full
-text query in the `WHERE` clause. If you include multiple full
-text queries in the `WHERE` clause then their scores will be
-combined using the same rules as Elasticsearch's
-<>. Here is a simple example:
-
-[source,js]
---------------------------------------------------
-POST /_xpack/sql?format=txt
-{
- "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY SCORE() DESC"
-}
---------------------------------------------------
-// CONSOLE
-// TEST[setup:library]
-
-Which results in something like:
-
-[source,text]
---------------------------------------------------
- SCORE() | author | name | page_count | release_date
----------------+---------------+-------------------+---------------+------------------------
-2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z
-1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z
-1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z
-1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z
---------------------------------------------------
-// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/]
-// TESTRESPONSE[_cat]
-
-Note that you can return `SCORE()` by adding it to the where clause. This
-is possible even if you are not sorting by `SCORE()`:
-
-[source,js]
---------------------------------------------------
-POST /_xpack/sql?format=txt
-{
- "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY page_count DESC"
-}
---------------------------------------------------
-// CONSOLE
-// TEST[setup:library]
-
-[source,text]
---------------------------------------------------
- SCORE() | author | name | page_count | release_date
----------------+---------------+-------------------+---------------+------------------------
-2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z
-1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z
-1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z
-1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z
---------------------------------------------------
-// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/]
-// TESTRESPONSE[_cat]
-
-
-[[sql-spec-syntax-extract]]
-==== `EXTRACT`
-
-Elasticsearch supports `EXTRACT` to extract fields from datetimes.
-You can run any <>
-with `EXTRACT( FROM )`. So
-
-["source","sql",subs="attributes,callouts,macros"]
---------------------------------------------------
-include-tagged::{sql-specs}/datetime.csv-spec[extractDayOfYear]
---------------------------------------------------
-
-is the equivalent to
-
-["source","sql",subs="attributes,callouts,macros"]
---------------------------------------------------
-include-tagged::{sql-specs}/datetime.csv-spec[dayOfYear]
---------------------------------------------------
diff --git a/docs/reference/sql/language/syntax/describe-table.asciidoc b/docs/reference/sql/language/syntax/describe-table.asciidoc
new file mode 100644
index 0000000000000..dd2d27a5781d2
--- /dev/null
+++ b/docs/reference/sql/language/syntax/describe-table.asciidoc
@@ -0,0 +1,22 @@
+[role="xpack"]
+[testenv="basic"]
+[[sql-syntax-describe-table]]
+=== DESCRIBE TABLE
+
+.Synopsis
+[source, sql]
+----
+DESCRIBE table
+----
+
+or
+
+[source, sql]
+----
+DESC table
+----
+
+
+.Description
+
+`DESC` and `DESCRIBE` are aliases to <>.
diff --git a/docs/reference/sql/language/syntax/index.asciidoc b/docs/reference/sql/language/syntax/index.asciidoc
new file mode 100644
index 0000000000000..4af8f19d7034b
--- /dev/null
+++ b/docs/reference/sql/language/syntax/index.asciidoc
@@ -0,0 +1,18 @@
+[role="xpack"]
+[testenv="basic"]
+[[sql-commands]]
+== SQL Commands
+
+This section contains the list of SQL commands supported by {es-sql} along with their syntax:
+
+<>:: Describe a table.
+<>:: Retrieve rows from zero or more tables.
+<>:: List columns in table.
+<>:: List supported functions.
+<>:: List tables available.
+
+include::describe-table.asciidoc[]
+include::select.asciidoc[]
+include::show-columns.asciidoc[]
+include::show-functions.asciidoc[]
+include::show-tables.asciidoc[]
diff --git a/docs/reference/sql/language/syntax/select.asciidoc b/docs/reference/sql/language/syntax/select.asciidoc
new file mode 100644
index 0000000000000..4a7c0534b68a3
--- /dev/null
+++ b/docs/reference/sql/language/syntax/select.asciidoc
@@ -0,0 +1,286 @@
+[role="xpack"]
+[testenv="basic"]
+[[sql-syntax-select]]
+=== SELECT
+
+.Synopsis
+[source, sql]
+----
+SELECT select_expr [, ...]
+[ FROM table_name ]
+[ WHERE condition ]
+[ GROUP BY grouping_element [, ...] ]
+[ HAVING condition]
+[ ORDER BY expression [ ASC | DESC ] [, ...] ]
+[ LIMIT [ count ] ]
+----
+
+.Description
+
+Retrieves rows from zero or more tables.
+
+The general execution of `SELECT` is as follows:
+
+. All elements in the `FROM` list are computed (each element can be base or alias table). Currently `FROM` supports exactly one table. Do note however that the table name can be a pattern (see <> below).
+. If the `WHERE` clause is specified, all rows that do not satisfy the condition are eliminated from the output. (See <> below.)
+. If the `GROUP BY` clause is specified, or if there are aggregate function calls, the output is combined into groups of rows that match on one or more values, and the results of aggregate functions are computed. If the `HAVING` clause is present, it eliminates groups that do not satisfy the given condition. (See <> and <> below.)
+. The actual output rows are computed using the `SELECT` output expressions for each selected row or row group.
+. If the `ORDER BY` clause is specified, the returned rows are sorted in the specified order. If `ORDER BY` is not given, the rows are returned in whatever order the system finds fastest to produce. (See <> below.)
+. If the `LIMIT` is specified, the `SELECT` statement only returns a subset of the result rows. (See <> below.)
+
+
+[[sql-syntax-select-list]]
+==== `SELECT` List
+
+`SELECT` list, namely the expressions between `SELECT` and `FROM`, represent the output rows of the `SELECT` statement.
+
+As with a table, every output column of a `SELECT` has a name which can be either specified per column through the `AS` keyword :
+
+[source,sql]
+----
+SELECT column AS c
+----
+
+assigned by {es-sql} if no name is given:
+
+[source,sql]
+----
+SELECT 1 + 1
+----
+
+or if it's a simple column reference, use its name as the column name:
+
+[source,sql]
+----
+SELECT col FROM table
+----
+
+[[sql-syntax-select-wildcard]]
+==== Wildcard
+
+To select all the columns in the source, one can use `*`:
+
+["source","sql",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{sql-specs}/select.sql-spec[wildcardWithOrder]
+--------------------------------------------------
+
+which essentially returns all columsn found.
+
+[[sql-syntax-from]]
+[float]
+==== FROM Clause
+
+The `FROM` clause specifies one table for the `SELECT` and has the following syntax:
+
+[source, sql]
+----
+FROM table_name [ [ AS ] alias ]
+----
+
+where:
+
+`table_name`::
+
+Represents the name (optionally qualified) of an existing table, either a concrete or base one (actual index) or alias.
+If the table name contains special SQL characters (such as `.`,`-`,etc...) use double quotes to escape them:
+[source, sql]
+----
+SELECT ... FROM "some-table"
+----
+
+The name can be a <> pointing to multiple indices (likely requiring quoting as mentioned above) with the restriction that *all* resolved concrete tables have **exact mapping**.
+
+`alias`::
+A substitute name for the `FROM` item containing the alias. An alias is used for brevity or to eliminate ambiguity. When an alias is provided, it completely hides the actual name of the table and must be used in its place.
+
+[[sql-syntax-where]]
+[float]
+==== WHERE Clause
+
+The optional `WHERE` clause is used to filter rows from the query and has the following syntax:
+
+[source, sql]
+----
+WHERE condition
+----
+
+where:
+
+`condition`::
+
+Represents an expression that evaluates to a `boolean`. Only the rows that match the condition (to `true`) are returned.
+
+[[sql-syntax-group-by]]
+[float]
+==== GROUP BY
+
+The `GROUP BY` clause is used to divide the results into groups of rows on matching values from the designated columns. It has the following syntax:
+
+[source, sql]
+----
+GROUP BY grouping_element [, ...]
+----
+
+where:
+
+`grouping_element`::
+
+Represents an expression on which rows are being grouped _on_. It can be a column name, name or ordinal number of a column or an arbitrary expression of column values.
+
+When a `GROUP BY` clause is used in a `SELECT`, _all_ output expressions must be either aggregate functions or expressions used for grouping or derivates of (otherwise there would be more than one possible value to return for each ungrouped column).
+
+[[sql-syntax-having]]
+[float]
+==== HAVING
+
+The `HAVING` clause can be used _only_ along aggregate functions (and thus `GROUP BY`) to filter what groups are kept or not and has the following syntax:
+
+[source, sql]
+----
+GROUP BY condition
+----
+
+where:
+
+`condition`::
+
+Represents an expression that evaluates to a `boolean`. Only groups that match the condition (to `true`) are returned.
+
+Both `WHERE` and `HAVING` are used for filtering however there are several differences between them:
+
+. `WHERE` works on individual *rows*, `HAVING` works on the *groups* created by ``GROUP BY``
+. `WHERE` is evaluated *before* grouping, `HAVING` is evaluated *after* grouping
+
+Note that it is possible to have a `HAVING` clause without a ``GROUP BY``. In this case, an __implicit grouping__ is applied, meaning all selected rows are considered to form a single group and `HAVING` can be applied on any of the aggregate functions specified on this group. `
+As such a query emits only a single row (as there is only a single group), `HAVING` condition returns either one row (the group) or zero if the condition fails.
+
+[[sql-syntax-order-by]]
+[float]
+==== ORDER BY
+
+The `ORDER BY` clause is used to sort the results of `SELECT` by one or more expressions:
+
+[source, sql]
+----
+ORDER BY expression [ ASC | DESC ] [, ...]
+----
+
+where:
+
+`expression`::
+
+Represents an input column, an output column or an ordinal number of the position (starting from one) of an output column. Additionally, ordering can be done based on the results _score_ `
+The direction, if not specified, is by default `ASC` (ascending). `
+Regardless of the ordering specified, null values are ordered last (at the end).
+
+IMPORTANT: When used along-side, `GROUP BY` expression can point _only_ to the columns used for grouping.
+
+For example, the following query sorts by an arbitrary input field (`page_count`):
+
+[source,js]
+--------------------------------------------------
+POST /_xpack/sql?format=txt
+{
+ "query": "SELECT * FROM library ORDER BY page_count DESC LIMIT 5"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:library]
+
+which results in something like:
+
+[source,text]
+--------------------------------------------------
+ author | name | page_count | release_date
+-----------------+--------------------+---------------+------------------------
+Peter F. Hamilton|Pandora's Star |768 |2004-03-02T00:00:00.000Z
+Vernor Vinge |A Fire Upon the Deep|613 |1992-06-01T00:00:00.000Z
+Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z
+Alastair Reynolds|Revelation Space |585 |2000-03-15T00:00:00.000Z
+James S.A. Corey |Leviathan Wakes |561 |2011-06-02T00:00:00.000Z
+--------------------------------------------------
+// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/]
+// TESTRESPONSE[_cat]
+
+[[sql-syntax-order-by-score]]
+==== Order By Score
+
+When doing full-text queries in the `WHERE` clause, results can be returned based on their
+{defguide}/relevance-intro.html[score] or _relevance_ to the given query.
+
+NOTE: When doing multiple text queries in the `WHERE` clause then, their scores will be
+combined using the same rules as {es}'s
+<>.
+
+To sort based on the `score`, use the special function `SCORE()`:
+
+[source,js]
+--------------------------------------------------
+POST /_xpack/sql?format=txt
+{
+ "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY SCORE() DESC"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:library]
+
+Which results in something like:
+
+[source,text]
+--------------------------------------------------
+ SCORE() | author | name | page_count | release_date
+---------------+---------------+-------------------+---------------+------------------------
+2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z
+1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z
+1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z
+1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z
+--------------------------------------------------
+// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/]
+// TESTRESPONSE[_cat]
+
+Note that you can return `SCORE()` by adding it to the where clause. This
+is possible even if you are not sorting by `SCORE()`:
+
+[source,js]
+--------------------------------------------------
+POST /_xpack/sql?format=txt
+{
+ "query": "SELECT SCORE(), * FROM library WHERE match(name, 'dune') ORDER BY page_count DESC"
+}
+--------------------------------------------------
+// CONSOLE
+// TEST[setup:library]
+
+[source,text]
+--------------------------------------------------
+ SCORE() | author | name | page_count | release_date
+---------------+---------------+-------------------+---------------+------------------------
+2.288635 |Frank Herbert |Dune |604 |1965-06-01T00:00:00.000Z
+1.4005898 |Frank Herbert |God Emperor of Dune|454 |1981-05-28T00:00:00.000Z
+1.6086555 |Frank Herbert |Children of Dune |408 |1976-04-21T00:00:00.000Z
+1.8893257 |Frank Herbert |Dune Messiah |331 |1969-10-15T00:00:00.000Z
+--------------------------------------------------
+// TESTRESPONSE[s/\|/\\|/ s/\+/\\+/ s/\(/\\\(/ s/\)/\\\)/]
+// TESTRESPONSE[_cat]
+
+NOTE:
+Trying to return `score` from a non full-text queries will return the same value for all results, as
+all are equilley relevant.
+
+[[sql-syntax-limit]]
+[float]
+==== LIMIT
+
+The `LIMIT` clause restricts (limits) the number of rows returns using the format:
+
+[source, sql]
+----
+LIMIT ( count | ALL )
+----
+
+where
+
+count:: is a positive integer or zero indicating the maximum *possible* number of results being returned (as there might be less matches than the limit). If `0` is specified, no results are returned.
+
+ALL:: indicates there is no limit and thus all results are being returned.
diff --git a/docs/reference/sql/language/syntax/show-columns.asciidoc b/docs/reference/sql/language/syntax/show-columns.asciidoc
new file mode 100644
index 0000000000000..a52c744f17a97
--- /dev/null
+++ b/docs/reference/sql/language/syntax/show-columns.asciidoc
@@ -0,0 +1,14 @@
+[role="xpack"]
+[testenv="basic"]
+[[sql-syntax-show-columns]]
+=== SHOW COLUMNS
+
+.Synopsis
+[source, sql]
+----
+SHOW COLUMNS [ FROM | IN ] ? table
+----
+
+.Description
+
+List the columns in table and their data type (and other attributes).
diff --git a/docs/reference/sql/language/syntax/show-functions.asciidoc b/docs/reference/sql/language/syntax/show-functions.asciidoc
new file mode 100644
index 0000000000000..964cdf39081c6
--- /dev/null
+++ b/docs/reference/sql/language/syntax/show-functions.asciidoc
@@ -0,0 +1,16 @@
+[role="xpack"]
+[testenv="basic"]
+[[sql-syntax-show-functions]]
+=== SHOW FUNCTIONS
+
+.Synopsis
+[source, sql]
+----
+SHOW FUNCTIONS [ LIKE? pattern<1>? ]?
+----
+
+<1> SQL match pattern
+
+.Description
+
+List all the SQL functions and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern.
diff --git a/docs/reference/sql/language/syntax/show-tables.asciidoc b/docs/reference/sql/language/syntax/show-tables.asciidoc
new file mode 100644
index 0000000000000..7772c39c6fc21
--- /dev/null
+++ b/docs/reference/sql/language/syntax/show-tables.asciidoc
@@ -0,0 +1,16 @@
+[role="xpack"]
+[testenv="basic"]
+[[sql-syntax-show-tables]]
+=== SHOW TABLES
+
+.Synopsis
+[source, sql]
+----
+SHOW TABLES [ LIKE? pattern<1>? ]?
+----
+
+<1> SQL match pattern
+
+.Description
+
+List the tables available to the current user and their type. The `LIKE` clause can be used to restrict the list of names to the given pattern.
diff --git a/docs/reference/sql/overview.asciidoc b/docs/reference/sql/overview.asciidoc
index b4b93d92a13d4..a72f5ca61feb5 100644
--- a/docs/reference/sql/overview.asciidoc
+++ b/docs/reference/sql/overview.asciidoc
@@ -6,6 +6,7 @@
{es-sql} aims to provide a powerful yet lightweight SQL interface to {es}.
[[sql-introduction]]
+[float]
=== Introduction
{es-sql} is an X-Pack component that allows SQL-like queries to be executed in real-time against {es}.
@@ -14,6 +15,7 @@ _natively_ inside {es}.
One can think of {es-sql} as a _translator_, one that understands both SQL and {es} and makes it easy to read and process data in real-time, at scale by leveraging {es} capabilities.
[[sql-why]]
+[float]
=== Why {es-sql} ?
Native integration::
diff --git a/docs/reference/sql/security.asciidoc b/docs/reference/sql/security.asciidoc
new file mode 100644
index 0000000000000..64f554f023195
--- /dev/null
+++ b/docs/reference/sql/security.asciidoc
@@ -0,0 +1,39 @@
+[role="xpack"]
+[testenv="basic"]
+[[sql-security]]
+== Security
+
+{es-sql} integrates with security, if this is enabled on your cluster.
+In such a scenario, {es-sql} supports both security at the transport layer (by encrypting the communication between the consumer and the server) and authentication (for the access layer).
+
+[float]
+==== SSL/TLS configuration
+
+In case of an encrypted transport, the SSL/TLS support needs to be enabled in {es-sql} to properly establish communication with {es}. This is done by setting the `ssl` property to `true` or by using the `https` prefix in the URL. +
+Depending on your SSL configuration (whether the certificates are signed by a CA or not, whether they are global at JVM level or just local to one application), might require setting up the `keystore` and/or `truststore`, that is where the _credentials_ are stored (`keystore` - which typically stores private keys and certificates) and how to _verify_ them (`truststore` - which typically stores certificates from third party also known as CA - certificate authorities). +
+Typically (and again, do note that your environment might differ significantly), if the SSL setup for {es-sql} is not already done at the JVM level, one needs to setup the keystore if the {es-sql} security requires client authentication (PKI - Public Key Infrastructure), and setup `truststore` if SSL is enabled.
+
+[float]
+==== Authentication
+
+The authentication support in {es-sql} is of two types:
+
+Username/Password:: Set these through `user` and `password` properties.
+PKI/X.509:: Use X.509 certificates to authenticate {es-sql} to {es}. For this, one would need to setup the `keystore` containing the private key and certificate to the appropriate user (configured in {es}) and the `truststore` with the CA certificate used to sign the SSL/TLS certificates in the {es} cluster. That is, one should setup the key to authenticate {es-sql} and also to verify that is the right one. To do so, one should set the `ssl.keystore.location` and `ssl.truststore.location` properties to indicate the `keystore` and `truststore` to use. It is recommended to have these secured through a password in which case `ssl.keystore.pass` and `ssl.truststore.pass` properties are required.
+
+[float]
+[[sql-security-permissions]]
+==== Permissions (server-side)
+Lastly, one the server one need to add a few permissions to
+users so they can run SQL. To run SQL a user needs `read` and
+`indices:admin/get` permissions at minimum while some parts of
+the API require `cluster:monitor/main`.
+
+The following example configures a role that can run SQL in JDBC querying the `test` and `bort`
+indices:
+
+["source","yaml",subs="attributes,callouts,macros"]
+--------------------------------------------------
+include-tagged::{sql-tests}/security/roles.yml[cli_jdbc]
+--------------------------------------------------
+
diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java
index 2a1046acb9cdb..1c64fdb7408ef 100644
--- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java
+++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/ForEachProcessor.java
@@ -30,6 +30,7 @@
import java.util.Set;
import static org.elasticsearch.ingest.ConfigurationUtils.newConfigurationException;
+import static org.elasticsearch.ingest.ConfigurationUtils.readBooleanProperty;
import static org.elasticsearch.ingest.ConfigurationUtils.readMap;
import static org.elasticsearch.ingest.ConfigurationUtils.readStringProperty;
@@ -47,16 +48,28 @@ public final class ForEachProcessor extends AbstractProcessor {
private final String field;
private final Processor processor;
+ private final boolean ignoreMissing;
- ForEachProcessor(String tag, String field, Processor processor) {
+ ForEachProcessor(String tag, String field, Processor processor, boolean ignoreMissing) {
super(tag);
this.field = field;
this.processor = processor;
+ this.ignoreMissing = ignoreMissing;
+ }
+
+ boolean isIgnoreMissing() {
+ return ignoreMissing;
}
@Override
public void execute(IngestDocument ingestDocument) throws Exception {
- List values = ingestDocument.getFieldValue(field, List.class);
+ List values = ingestDocument.getFieldValue(field, List.class, ignoreMissing);
+ if (values == null) {
+ if (ignoreMissing) {
+ return;
+ }
+ throw new IllegalArgumentException("field [" + field + "] is null, cannot loop over its elements.");
+ }
List