diff --git a/.ci/bwcVersions b/.ci/bwcVersions index ec0614ed2f549..9277aad1c73c9 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -38,7 +38,7 @@ BWC_VERSION: - "7.13.2" - "7.13.3" - "7.13.4" - - "7.13.5" - "7.14.0" + - "7.14.1" - "7.15.0" - "8.0.0" diff --git a/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/EnrollmentIT.java b/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/EnrollmentIT.java index d3942670920a2..5266db022bd4a 100644 --- a/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/EnrollmentIT.java +++ b/client/rest-high-level/qa/ssl-enabled/src/javaRestTest/java/org/elasticsearch/client/EnrollmentIT.java @@ -64,7 +64,7 @@ public void testEnrollNode() throws Exception { assertThat(nodeEnrollmentResponse, notNullValue()); assertThat(nodeEnrollmentResponse.getHttpCaKey(), endsWith("K2S3vidA=")); assertThat(nodeEnrollmentResponse.getHttpCaCert(), endsWith("LfkRjirc=")); - assertThat(nodeEnrollmentResponse.getTransportKey(), endsWith("1I-r8vOQ==")); + assertThat(nodeEnrollmentResponse.getTransportKey(), endsWith("1I+r8vOQ==")); assertThat(nodeEnrollmentResponse.getTransportCert(), endsWith("OpTdtgJo=")); List nodesAddresses = nodeEnrollmentResponse.getNodesAddresses(); assertThat(nodesAddresses.size(), equalTo(2)); @@ -75,7 +75,7 @@ public void testEnrollKibana() throws Exception { execute(highLevelClient().security()::enrollKibana, highLevelClient().security()::enrollKibanaAsync, RequestOptions.DEFAULT); assertThat(kibanaResponse, notNullValue()); assertThat(kibanaResponse.getHttpCa() - , endsWith("brcNC5xq6YE7C4_06nH7F6le4kE4Uo6c9fpkl4ehOxQxndNLn462tFF-8VBA8IftJ1PPWzqGxLsCTzM6p6w8sa-XhgNYglLfkRjirc=")); + , endsWith("brcNC5xq6YE7C4/06nH7F6le4kE4Uo6c9fpkl4ehOxQxndNLn462tFF+8VBA8IftJ1PPWzqGxLsCTzM6p6w8sa+XhgNYglLfkRjirc=")); assertNotNull(kibanaResponse.getPassword()); assertThat(kibanaResponse.getPassword().toString().length(), equalTo(14)); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java index 41581cba0e97c..507308ca24160 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchableSnapshotsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.client; +import org.apache.http.client.methods.HttpGet; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; @@ -35,7 +36,10 @@ import java.io.IOException; import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.emptyOrNullString; import static org.hamcrest.Matchers.equalTo; @@ -125,6 +129,35 @@ public void testCacheStats() throws Exception { assertThat(response.getHits().getHits()[0].getSourceAsMap(), aMapWithSize(2)); } + { + assertBusy(() -> { + final Response response = client().performRequest(new Request(HttpGet.METHOD_NAME, "/_nodes/stats/thread_pool")); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus())); + + @SuppressWarnings("unchecked") + final Map nodes = (Map) extractValue(responseAsMap(response), "nodes"); + assertThat(nodes, notNullValue()); + + for (String node : nodes.keySet()) { + @SuppressWarnings("unchecked") + final Map threadPools = + (Map) extractValue((Map) nodes.get(node), "thread_pool"); + assertNotNull("No thread pools on node " + node, threadPools); + + @SuppressWarnings("unchecked") + final Map threadPoolStats = + (Map) threadPools.get("searchable_snapshots_cache_fetch_async"); + assertNotNull("No thread pools stats on node " + node, threadPoolStats); + + final Number active = (Number) extractValue(threadPoolStats, "active"); + assertThat(node + " has still active tasks", active, equalTo(0)); + + final Number queue = (Number) extractValue(threadPoolStats, "queue"); + assertThat(node + " has still enqueued tasks", queue, equalTo(0)); + } + }, 30L, TimeUnit.SECONDS); + } + { final CachesStatsRequest request = new CachesStatsRequest(); final CachesStatsResponse response = execute(request, client::cacheStats, client::cacheStatsAsync); diff --git a/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc b/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc index 1ee9f4f45281d..242c2aec92d72 100644 --- a/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/geodistance-aggregation.asciidoc @@ -88,7 +88,7 @@ The specified field must be of type `geo_point` (which can only be set explicitl * Object format: `{ "lat" : 52.3760, "lon" : 4.894 }` - this is the safest format as it is the most explicit about the `lat` & `lon` values * String format: `"52.3760, 4.894"` - where the first number is the `lat` and the second is the `lon` -* Array format: `[4.894, 52.3760]` - which is based on the `GeoJson` standard and where the first number is the `lon` and the second one is the `lat` +* Array format: `[4.894, 52.3760]` - which is based on the GeoJSON standard where the first number is the `lon` and the second one is the `lat` By default, the distance unit is `m` (meters) but it can also accept: `mi` (miles), `in` (inches), `yd` (yards), `km` (kilometers), `cm` (centimeters), `mm` (millimeters). diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 2658ab6683d3c..2a192fe33ed0a 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -153,7 +153,7 @@ The response for the above aggregation: [[geocentroid-aggregation-geo-shape]] ==== Geo Centroid Aggregation on `geo_shape` fields -The centroid metric for geo-shapes is more nuanced than for points. The centroid of a specific aggregation bucket +The centroid metric for geoshapes is more nuanced than for points. The centroid of a specific aggregation bucket containing shapes is the centroid of the highest-dimensionality shape type in the bucket. For example, if a bucket contains shapes comprising of polygons and lines, then the lines do not contribute to the centroid metric. Each type of shape's centroid is calculated differently. Envelopes and circles ingested via the <> are treated @@ -233,12 +233,12 @@ POST /places/_search?size=0 .Using `geo_centroid` as a sub-aggregation of `geohash_grid` ==== The <> -aggregation places documents, not individual geo-points, into buckets. If a +aggregation places documents, not individual geopoints, into buckets. If a document's `geo_point` field contains <>, the document -could be assigned to multiple buckets, even if one or more of its geo-points are +could be assigned to multiple buckets, even if one or more of its geopoints are outside the bucket boundaries. If a `geocentroid` sub-aggregation is also used, each centroid is calculated -using all geo-points in a bucket, including those outside the bucket boundaries. +using all geopoints in a bucket, including those outside the bucket boundaries. This can result in centroids outside of bucket boundaries. ==== diff --git a/docs/reference/indices/index-mgmt.asciidoc b/docs/reference/indices/index-mgmt.asciidoc index 0cda6dbe7ab09..0543da9b2a562 100644 --- a/docs/reference/indices/index-mgmt.asciidoc +++ b/docs/reference/indices/index-mgmt.asciidoc @@ -144,7 +144,7 @@ image::images/index-mgmt/management_index_component_template.png[Component templ section blank. . Define a mapping that contains an <> field named `geo` with a -child <> field named `coordinates`: +child <> field named `coordinates`: + [role="screenshot"] image::images/index-mgmt/management-index-templates-mappings.png[Mapped fields page] diff --git a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc index ff79cf890df5e..9432c9c06dec8 100644 --- a/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc +++ b/docs/reference/ingest/geo-match-enrich-policy-type-ex.asciidoc @@ -82,13 +82,13 @@ pipeline. In the pipeline, add an <> that includes: * Your enrich policy. -* The `field` of incoming documents used to match the geo_shape of documents +* The `field` of incoming documents used to match the geoshape of documents from the enrich index. * The `target_field` used to store appended enrich data for incoming documents. This field contains the `match_field` and `enrich_fields` specified in your enrich policy. -* The `shape_relation`, which indicates how the processor matches geo_shapes in - incoming documents to geo_shapes in documents from the enrich index. See +* The `shape_relation`, which indicates how the processor matches geoshapes in + incoming documents to geoshapes in documents from the enrich index. See <<_spatial_relations>> for valid options and more information. [source,console] diff --git a/docs/reference/ingest/processors/enrich.asciidoc b/docs/reference/ingest/processors/enrich.asciidoc index 431c089744aad..85c4e25d8d0be 100644 --- a/docs/reference/ingest/processors/enrich.asciidoc +++ b/docs/reference/ingest/processors/enrich.asciidoc @@ -20,7 +20,7 @@ See <> section for more information about how | `ignore_missing` | no | false | If `true` and `field` does not exist, the processor quietly exits without modifying the document | `override` | no | true | If processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. | `max_matches` | no | 1 | The maximum number of matched documents to include under the configured target field. The `target_field` will be turned into a json array if `max_matches` is higher than 1, otherwise `target_field` will become a json object. In order to avoid documents getting too large, the maximum allowed value is 128. -| `shape_relation` | no | `INTERSECTS` | A spatial relation operator used to match the <> of incoming documents to documents in the enrich index. This option is only used for `geo_match` enrich policy types. See <<_spatial_relations>> for operators and more information. +| `shape_relation` | no | `INTERSECTS` | A spatial relation operator used to match the <> of incoming documents to documents in the enrich index. This option is only used for `geo_match` enrich policy types. See <<_spatial_relations>> for operators and more information. include::common-options.asciidoc[] |====== diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 03d9c731d93b8..dd878aa595170 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -1,20 +1,20 @@ [[geo-point]] -=== Geo-point field type +=== Geopoint field type ++++ -Geo-point +Geopoint ++++ Fields of type `geo_point` accept latitude-longitude pairs, which can be used: -* to find geo-points within a <>, +* to find geopoints within a <>, within a certain <> of a central point, - or within a <> or within a <>. + or within a <> or within a <>. * to aggregate documents <> or by <> from a central point. * to integrate distance into a document's <>. * to <> documents by distance. -There are five ways that a geo-point may be specified, as demonstrated below: +There are five ways that a geopoint may be specified, as demonstrated below: [source,console] -------------------------------------------------- @@ -31,7 +31,7 @@ PUT my-index-000001 PUT my-index-000001/_doc/1 { - "text": "Geo-point as an object", + "text": "Geopoint as an object", "location": { <1> "lat": 41.12, "lon": -71.34 @@ -40,25 +40,25 @@ PUT my-index-000001/_doc/1 PUT my-index-000001/_doc/2 { - "text": "Geo-point as a string", + "text": "Geopoint as a string", "location": "41.12,-71.34" <2> } PUT my-index-000001/_doc/3 { - "text": "Geo-point as a geohash", + "text": "Geopoint as a geohash", "location": "drm3btev3e86" <3> } PUT my-index-000001/_doc/4 { - "text": "Geo-point as an array", + "text": "Geopoint as an array", "location": [ -71.34, 41.12 ] <4> } PUT my-index-000001/_doc/5 { - "text": "Geo-point as a WKT POINT primitive", + "text": "Geopoint as a WKT POINT primitive", "location" : "POINT (-71.34 41.12)" <5> } @@ -81,20 +81,20 @@ GET my-index-000001/_search } -------------------------------------------------- -<1> Geo-point expressed as an object, with `lat` and `lon` keys. -<2> Geo-point expressed as a string with the format: `"lat,lon"`. -<3> Geo-point expressed as a geohash. -<4> Geo-point expressed as an array with the format: [ `lon`, `lat`] -<5> Geo-point expressed as a https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text] +<1> Geopoint expressed as an object, with `lat` and `lon` keys. +<2> Geopoint expressed as a string with the format: `"lat,lon"`. +<3> Geopoint expressed as a geohash. +<4> Geopoint expressed as an array with the format: [ `lon`, `lat`] +<5> Geopoint expressed as a https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text] POINT with the format: `"POINT(lon lat)"` -<6> A geo-bounding box query which finds all geo-points that fall inside the box. +<6> A geo-bounding box query which finds all geopoints that fall inside the box. [IMPORTANT] -.Geo-points expressed as an array or string +.Geopoints expressed as an array or string ================================================== -Please note that string geo-points are ordered as `lat,lon`, while array -geo-points are ordered as the reverse: `lon,lat`. +Please note that string geopoints are ordered as `lat,lon`, while array +geopoints are ordered as the reverse: `lon,lat`. Originally, `lat,lon` was used for both array and string, but the array format was changed early on to conform to the format used by GeoJSON. @@ -121,9 +121,9 @@ The following parameters are accepted by `geo_point` fields: <>:: - If `true`, malformed geo-points are ignored. If `false` (default), - malformed geo-points throw an exception and reject the whole document. - A geo-point is considered malformed if its latitude is outside the range + If `true`, malformed geopoints are ignored. If `false` (default), + malformed geopoints throw an exception and reject the whole document. + A geopoint is considered malformed if its latitude is outside the range -90 <= latitude <= 90, or if its longitude is outside the range -180 <= longitude <= 180. Note that this cannot be set if the `script` parameter is used. @@ -131,10 +131,14 @@ The following parameters are accepted by `geo_point` fields: If `true` (default) three dimension points will be accepted (stored in source) but only latitude and longitude values will be indexed; the third dimension is - ignored. If `false`, geo-points containing any more than latitude and longitude + ignored. If `false`, geopoints containing any more than latitude and longitude (two dimensions) values throw an exception and reject the whole document. Note that this cannot be set if the `script` parameter is used. +<>:: + + Should the field be searchable? Accepts `true` (default) and `false`. + <>:: Accepts an geopoint value which is substituted for any explicit `null` values. @@ -161,9 +165,9 @@ The following parameters are accepted by `geo_point` fields: <>, and should emit points as a pair of (lat, lon) double values. -==== Using geo-points in scripts +==== Using geopoints in scripts -When accessing the value of a geo-point in a script, the value is returned as +When accessing the value of a geopoint in a script, the value is returned as a `GeoPoint` object, which allows access to the `.lat` and `.lon` values respectively: diff --git a/docs/reference/mapping/types/geo-shape.asciidoc b/docs/reference/mapping/types/geo-shape.asciidoc index 7365ae074006d..89fc7dc20f527 100644 --- a/docs/reference/mapping/types/geo-shape.asciidoc +++ b/docs/reference/mapping/types/geo-shape.asciidoc @@ -1,7 +1,7 @@ [[geo-shape]] -=== Geo-shape field type +=== Geoshape field type ++++ -Geo-shape +Geoshape ++++ The `geo_shape` data type facilitates the indexing of and searching @@ -10,14 +10,14 @@ used when either the data being indexed or the queries being executed contain shapes other than just points. You can query documents using this type using -<>. +a <>. [[geo-shape-mapping-options]] [discrete] ==== Mapping Options -The geo_shape mapping maps geo_json geometry objects to the geo_shape -type. To enable it, users must explicitly map fields to the geo_shape +The `geo_shape` mapping maps GeoJSON geometry objects to the `geo_shape` +type. To enable it, users must explicitly map fields to the `geo_shape` type. [cols="<,<,<",options="header",] @@ -58,7 +58,7 @@ entire document. |`ignore_z_value` |If `true` (default) three dimension points will be accepted (stored in source) but only latitude and longitude values will be indexed; the third dimension is ignored. If `false`, -geo-points containing any more than latitude and longitude (two dimensions) values throw an exception +geopoints containing any more than latitude and longitude (two dimensions) values throw an exception and reject the whole document. | `true` @@ -291,8 +291,8 @@ POST /example/_doc -------------------------------------------------- // TEST[catch:/mapper_parsing_exception/] -An `orientation` parameter can be defined when setting the geo_shape mapping (see <>). This will define vertex -order for the coordinate list on the mapped geo_shape field. It can also be overridden on each document. The following is an example for +An `orientation` parameter can be defined when setting the `geo_shape` mapping (see <>). This will define vertex +order for the coordinate list on the mapped `geo_shape` field. It can also be overridden on each document. The following is an example for overriding the orientation on a document: [source,console] @@ -313,7 +313,7 @@ POST /example/_doc [[geo-multipoint]] ===== http://geojson.org/geojson-spec.html#id5[MultiPoint] -The following is an example of a list of geojson points: +The following is an example of a list of GeoJSON points: [source,console] -------------------------------------------------- @@ -342,7 +342,7 @@ POST /example/_doc [[geo-multilinestring]] ===== http://geojson.org/geojson-spec.html#id6[MultiLineString] -The following is an example of a list of geojson linestrings: +The following is an example of a list of GeoJSON linestrings: [source,console] -------------------------------------------------- @@ -373,7 +373,7 @@ POST /example/_doc [[geo-multipolygon]] ===== http://geojson.org/geojson-spec.html#id7[MultiPolygon] -The following is an example of a list of geojson polygons (second polygon contains a hole): +The following is an example of a list of GeoJSON polygons (second polygon contains a hole): [source,console] -------------------------------------------------- @@ -404,7 +404,7 @@ POST /example/_doc [[geo-geometry_collection]] ===== http://geojson.org/geojson-spec.html#geometrycollection[Geometry Collection] -The following is an example of a collection of geojson geometry objects: +The following is an example of a collection of GeoJSON geometry objects: [source,console] -------------------------------------------------- @@ -479,5 +479,5 @@ a <>. Due to the complex input structure and index representation of shapes, it is not currently possible to sort shapes or retrieve their fields -directly. The geo_shape value is only retrievable through the `_source` +directly. The `geo_shape` value is only retrievable through the `_source` field. diff --git a/docs/reference/mapping/types/shape.asciidoc b/docs/reference/mapping/types/shape.asciidoc index a2236ffc8b38f..0251c11e0d3d5 100644 --- a/docs/reference/mapping/types/shape.asciidoc +++ b/docs/reference/mapping/types/shape.asciidoc @@ -18,7 +18,7 @@ You can query documents using this type using [discrete] ==== Mapping Options -Like the <> field type, the `shape` field mapping maps +Like the <> field type, the `shape` field mapping maps http://geojson.org[GeoJSON] or https://docs.opengeospatial.org/is/12-063r5/12-063r5.html[Well-Known Text] (WKT) geometry objects to the shape type. To enable it, users must explicitly map fields to the shape type. @@ -34,8 +34,8 @@ different ways. 1. Right-hand rule: `right`, `ccw`, `counterclockwise`, 2. Left-hand rule: `left`, `cw`, `clockwise`. The default orientation (`counterclockwise`) complies with the OGC standard which defines outer ring vertices in counterclockwise order with inner ring(s) vertices (holes) -in clockwise order. Setting this parameter in the geo_shape mapping explicitly -sets vertex order for the coordinate list of a geo_shape field but can be +in clockwise order. Setting this parameter in the `geo_shape` mapping explicitly +sets vertex order for the coordinate list of a `geo_shape` field but can be overridden in each individual GeoJSON or WKT document. | `ccw` @@ -46,7 +46,7 @@ entire document. |`ignore_z_value` |If `true` (default) three dimension points will be accepted (stored in source) but only latitude and longitude values will be indexed; the third dimension is ignored. If `false`, -geo-points containing any more than latitude and longitude (two dimensions) values throw an exception +geopoints containing any more than latitude and longitude (two dimensions) values throw an exception and reject the whole document. | `true` @@ -279,7 +279,7 @@ POST /example/_doc [[multipoint]] ===== http://geojson.org/geojson-spec.html#id5[MultiPoint] -The following is an example of a list of geojson points: +The following is an example of a list of GeoJSON points: [source,console] -------------------------------------------------- @@ -308,7 +308,7 @@ POST /example/_doc [[multilinestring]] ===== http://geojson.org/geojson-spec.html#id6[MultiLineString] -The following is an example of a list of geojson linestrings: +The following is an example of a list of GeoJSON linestrings: [source,console] -------------------------------------------------- @@ -339,7 +339,7 @@ POST /example/_doc [[multipolygon]] ===== http://geojson.org/geojson-spec.html#id7[MultiPolygon] -The following is an example of a list of geojson polygons (second polygon contains a hole): +The following is an example of a list of GeoJSON polygons (second polygon contains a hole): [source,console] -------------------------------------------------- @@ -370,7 +370,7 @@ POST /example/_doc [[geometry_collection]] ===== http://geojson.org/geojson-spec.html#geometrycollection[Geometry Collection] -The following is an example of a collection of geojson geometry objects: +The following is an example of a collection of GeoJSON geometry objects: [source,console] -------------------------------------------------- diff --git a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc index a9fd9858d6b9d..2dba8a32f75b3 100644 --- a/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc +++ b/docs/reference/ml/anomaly-detection/functions/ml-geo-functions.asciidoc @@ -70,7 +70,7 @@ For example, JSON data might contain the following transaction coordinates: // NOTCONSOLE In {es}, location data is likely to be stored in `geo_point` fields. For more -information, see {ref}/geo-point.html[Geo-point data type]. This data type is +information, see {ref}/geo-point.html[`geo_point` data type]. This data type is supported natively in {ml-features}. Specifically, {dfeed} when pulling data from a `geo_point` field, will transform the data into the appropriate `lat,lon` string format before sending to the {anomaly-job}. diff --git a/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc b/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc index 9808b98cf1d16..9d02c3d011eac 100644 --- a/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc +++ b/docs/reference/ml/anomaly-detection/ml-configuring-transform.asciidoc @@ -421,7 +421,7 @@ The preview {dfeed} API returns the following results, which show that [[ml-configuring-transform8]] -.Example 8: Transforming geo_point data +.Example 8: Transforming geopoint data [source,console] -------------------------------------------------- diff --git a/docs/reference/query-dsl/function-score-query.asciidoc b/docs/reference/query-dsl/function-score-query.asciidoc index e377bb74b0149..5d898f6925aa6 100644 --- a/docs/reference/query-dsl/function-score-query.asciidoc +++ b/docs/reference/query-dsl/function-score-query.asciidoc @@ -368,7 +368,7 @@ decay function is specified as -------------------------------------------------- // NOTCONSOLE <1> The `DECAY_FUNCTION` should be one of `linear`, `exp`, or `gauss`. -<2> The specified field must be a numeric, date, or geo-point field. +<2> The specified field must be a numeric, date, or geopoint field. In the above example, the field is a <> and origin can be provided in geo format. `scale` and `offset` must be given with a unit in @@ -656,7 +656,7 @@ image::https://f.cloud.github.com/assets/4320215/768165/19d8b1aa-e899-11e2-91bc- ==== Supported fields for decay functions -Only numeric, date, and geo-point fields are supported. +Only numeric, date, and geopoint fields are supported. ==== What if a field is missing? diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 664edaaafe18c..ec7e103ce0d49 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -175,7 +175,7 @@ accept geo points with invalid latitude or longitude, set to [discrete] ==== Accepted Formats -In much the same way the geo_point type can accept different +In much the same way the `geo_point` type can accept different representations of the geo point, the filter can accept it as well: [discrete] diff --git a/docs/reference/query-dsl/geo-polygon-query.asciidoc b/docs/reference/query-dsl/geo-polygon-query.asciidoc index dbe9adb0e2026..723e91e3bc6fb 100644 --- a/docs/reference/query-dsl/geo-polygon-query.asciidoc +++ b/docs/reference/query-dsl/geo-polygon-query.asciidoc @@ -145,7 +145,7 @@ GET /_search // TEST[warning:Deprecated field [geo_polygon] used, replaced by [[geo_shape] query where polygons are defined in geojson or wkt]] [discrete] -==== geo_point Type +==== `geo_point` type The query *requires* the <> type to be set on the relevant field. diff --git a/docs/reference/query-dsl/geo-queries.asciidoc b/docs/reference/query-dsl/geo-queries.asciidoc index b4eb86763e702..363c0ef8a0fad 100644 --- a/docs/reference/query-dsl/geo-queries.asciidoc +++ b/docs/reference/query-dsl/geo-queries.asciidoc @@ -9,20 +9,19 @@ lines, circles, polygons, multi-polygons, etc. The queries in this group are: <> query:: -Finds documents with geo-points that fall into the specified rectangle. +Finds documents with geopoints that fall into the specified rectangle. <> query:: -Finds documents with geo-points within the specified distance of a central point. +Finds documents with geopoints within the specified distance of a central point. <> query:: -Find documents with geo-points within the specified polygon. +Find documents with geopoints within the specified polygon. <> query:: Finds documents with: -* `geo-shapes` which either intersect, are contained by, or do not intersect -with the specified geo-shape -* `geo-points` which intersect the specified -geo-shape +* Geoshapes which either intersect, are contained by, or do not intersect +with the specified geoshape +* Geopoints which intersect the specified geoshape include::geo-bounding-box-query.asciidoc[] diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index da0f42327188a..a1e052f98eb4e 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -1,13 +1,13 @@ [[query-dsl-geo-shape-query]] -=== Geo-shape query +=== Geoshape query ++++ -Geo-shape +Geoshape ++++ Filter documents indexed using the `geo_shape` or `geo_point` type. -Requires the <> or the -<>. +Requires the <> or the +<>. The `geo_shape` query uses the same grid square representation as the `geo_shape` mapping to find documents that have a shape that intersects diff --git a/docs/reference/query-dsl/special-queries.asciidoc b/docs/reference/query-dsl/special-queries.asciidoc index cad9b28cbfdbc..ca3afd94092d7 100644 --- a/docs/reference/query-dsl/special-queries.asciidoc +++ b/docs/reference/query-dsl/special-queries.asciidoc @@ -6,7 +6,7 @@ This group contains queries which do not fit into the other groups: <>:: A query that computes scores based on the dynamically computed distances -between the origin and documents' date, date_nanos and geo_point fields. +between the origin and documents' `date`, `date_nanos`, and `geo_point` fields. It is able to efficiently skip non-competitive hits. <>:: diff --git a/docs/reference/redirects.asciidoc b/docs/reference/redirects.asciidoc index e1797c6565115..5e253d39e1ef1 100644 --- a/docs/reference/redirects.asciidoc +++ b/docs/reference/redirects.asciidoc @@ -826,7 +826,7 @@ See <> and <>. [role="exclude",id="indices-component-templates"] === Component template APIs -coming::[7.x] +See <>. [role="exclude",id="modules-indices"] === Indices module diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index 4b2afd2b6ded6..7a5f4fefcda1d 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,8 +1,6 @@ [[release-highlights]] == What's new in {minor-version} -coming::[{minor-version}] - Here are the highlights of what's new and improved in {es} {minor-version}! For detailed information about this release, see the <> and diff --git a/docs/reference/scripting/expression.asciidoc b/docs/reference/scripting/expression.asciidoc index 61301fa873b40..84c07ebecaf31 100644 --- a/docs/reference/scripting/expression.asciidoc +++ b/docs/reference/scripting/expression.asciidoc @@ -137,5 +137,5 @@ e.g. based on geolocation of the user. There are a few limitations relative to other script languages: -* Only numeric, boolean, date, and geo_point fields may be accessed +* Only numeric, `boolean`, `date`, and `geo_point` fields may be accessed * Stored fields are not available diff --git a/rest-api-spec/build.gradle b/rest-api-spec/build.gradle index 85d4adddae9d2..2e80aa2b16218 100644 --- a/rest-api-spec/build.gradle +++ b/rest-api-spec/build.gradle @@ -80,7 +80,10 @@ def v7compatibilityNotSupportedTests = { 'field_caps/30_filter/Field caps with index filter', //behaviour change after #63692 4digits dates are parsed as epoch and in quotes as year - 'indices.forcemerge/10_basic/Check deprecation warning when incompatible only_expunge_deletes and max_num_segments values are both set', //#44761 bug fix + 'indices.forcemerge/10_basic/Check deprecation warning when incompatible only_expunge_deletes and max_num_segments values are both set', //#44761 bug fix, + + 'search/340_type_query/type query' //#47207 type query throws exception in compatible mode + ] } tasks.named("yamlRestCompatTest").configure { @@ -91,7 +94,6 @@ tasks.named("yamlRestCompatTest").configure { systemProperty 'tests.rest.blacklist', ([ 'search.aggregation/200_top_hits_metric/top_hits aggregation with sequence numbers', 'search/310_match_bool_prefix/multi_match multiple fields with cutoff_frequency throws exception', //cutoff_frequency - 'search/340_type_query/type query', // type_query - probably should behave like match_all ] + v7compatibilityNotSupportedTests()) .join(',') diff --git a/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_type_query.yml b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_type_query.yml new file mode 100644 index 0000000000000..fa4e20fdfa6fe --- /dev/null +++ b/rest-api-spec/src/yamlRestCompatTest/resources/rest-api-spec/test/v7compat/search/10_type_query.yml @@ -0,0 +1,52 @@ +--- +setup: + - skip: + features: + - "headers" + - "allowed_warnings_regex" +--- +type query throws exception when used: + - do: + index: + index: "test1" + id: 1 + type: "cat" + refresh: true + body: + foo: "bar" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + + - do: + catch: /\[types removal\] Type queries are deprecated, prefer to filter on a field instead./ + search: + rest_total_hits_as_int: true + index: "test1" + body: + query: + type: + value: "cat" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + + - do: + catch: /\[types removal\] Type queries are deprecated, prefer to filter on a field instead./ + search: + rest_total_hits_as_int: true + index: "test1" + body: + query: + type: + value: "_doc" + headers: + Content-Type: "application/vnd.elasticsearch+json;compatible-with=7" + Accept: "application/vnd.elasticsearch+json;compatible-with=7" + allowed_warnings_regex: + - "\\[types removal\\].*" + diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index a4856bacd2800..40722a09b08bb 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -85,8 +85,8 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_7_13_2 = new Version(7130299, org.apache.lucene.util.Version.LUCENE_8_8_2); public static final Version V_7_13_3 = new Version(7130399, org.apache.lucene.util.Version.LUCENE_8_8_2); public static final Version V_7_13_4 = new Version(7130499, org.apache.lucene.util.Version.LUCENE_8_8_2); - public static final Version V_7_13_5 = new Version(7130599, org.apache.lucene.util.Version.LUCENE_8_8_2); public static final Version V_7_14_0 = new Version(7140099, org.apache.lucene.util.Version.LUCENE_8_9_0); + public static final Version V_7_14_1 = new Version(7140199, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_7_15_0 = new Version(7150099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version V_8_0_0 = new Version(8000099, org.apache.lucene.util.Version.LUCENE_8_9_0); public static final Version CURRENT = V_8_0_0; diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 46fca6cafaf26..923c54d124383 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -52,24 +52,23 @@ /** * Represents the current state of the cluster. *

- * The cluster state object is immutable with the exception of the {@link RoutingNodes} structure, which is - * built on demand from the {@link RoutingTable}. - * The cluster state can be updated only on the master node. All updates are performed by on a - * single thread and controlled by the {@link ClusterService}. After every update the - * {@link Discovery#publish} method publishes a new version of the cluster state to all other nodes in the - * cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on - * the type of discovery. + * The cluster state object is immutable with the exception of the {@link RoutingNodes} structure, which is built on demand from the {@link + * RoutingTable}. The cluster state can be updated only on the master node. All updates are performed by on a single thread and controlled + * by the {@link ClusterService}. After every update the {@link Discovery#publish} method publishes a new version of the cluster state to + * all other nodes in the cluster. *

- * The cluster state implements the {@link Diffable} interface in order to support publishing of cluster state - * differences instead of the entire state on each change. The publishing mechanism should only send differences - * to a node if this node was present in the previous version of the cluster state. If a node was - * not present in the previous version of the cluster state, this node is unlikely to have the previous cluster - * state version and should be sent a complete version. In order to make sure that the differences are applied to the - * correct version of the cluster state, each cluster state version update generates {@link #stateUUID} that uniquely - * identifies this version of the state. This uuid is verified by the {@link ClusterStateDiff#apply} method to - * make sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method - * throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send + * Implements the {@link Diffable} interface in order to support publishing of cluster state differences instead of the entire state on each + * change. The publishing mechanism only sends differences to a node if this node was present in the previous version of the cluster state. + * If a node was not present in the previous version of the cluster state, this node is unlikely to have the previous cluster state version + * and should be sent a complete version. In order to make sure that the differences are applied to the correct version of the cluster + * state, each cluster state version update generates {@link #stateUUID} that uniquely identifies this version of the state. This uuid is + * verified by the {@link ClusterStateDiff#apply} method to make sure that the correct diffs are applied. If uuids don’t match, the {@link + * ClusterStateDiff#apply} method throws the {@link IncompatibleClusterStateVersionException}, which causes the publishing mechanism to send * a full version of the cluster state to the node on which this exception was thrown. + *

+ * Implements {@link ToXContentFragment} to be exposed in REST APIs (e.g. {@code GET _cluster/state} and {@code POST _cluster/reroute}) and + * to be indexed by monitoring, mostly just for diagnostics purposes. The XContent representation does not need to be 100% faithful since we + * never reconstruct a cluster state from its XContent representation, but the more faithful it is the more useful it is for diagnostics. */ public class ClusterState implements ToXContentFragment, Diffable { @@ -85,6 +84,13 @@ default boolean isPrivate() { return false; } + /** + * Serialize this {@link Custom} for diagnostic purposes, exposed by the

GET _cluster/state
API etc. The XContent + * representation does not need to be 100% faithful since we never reconstruct a cluster state from its XContent representation, but + * the more faithful it is the more useful it is for diagnostics. + */ + @Override + XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException; } private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index aa864df0423fe..cb315ab85892a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -933,7 +933,22 @@ private void writeShardSnapshotStatus(XContentBuilder builder, ToXContent indexI builder.field("index", indexId); builder.field("shard", shardId); builder.field("state", status.state()); + builder.field("generation", status.generation()); builder.field("node", status.nodeId()); + + if (status.state() == ShardState.SUCCESS) { + final ShardSnapshotResult result = status.shardSnapshotResult(); + builder.startObject("result"); + builder.field("generation", result.getGeneration()); + builder.humanReadableField("size_in_bytes", "size", result.getSize()); + builder.field("segments", result.getSegmentCount()); + builder.endObject(); + } + + if (status.reason() != null) { + builder.field("reason", status.reason()); + } + builder.endObject(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index cedf6375532b7..8d559824966a0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -17,6 +17,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.action.AliasesRequest; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; @@ -26,6 +27,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.coordination.CoordinationMetadata; import org.elasticsearch.common.xcontent.NamedObjectNotFoundException; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -75,6 +77,10 @@ import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; +/** + * {@link Metadata} is the part of the {@link ClusterState} which persists across restarts. This persistence is XContent-based, so a + * round-trip through XContent must be faithful in {@link XContentContext#GATEWAY} context. + */ public class Metadata implements Iterable, Diffable, ToXContentFragment { private static final Logger logger = LogManager.getLogger(Metadata.class); @@ -117,6 +123,10 @@ public enum XContentContext { */ public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class); + /** + * Custom metadata that persists (via XContent) across restarts. The deserialization method for each implementation must be registered + * with the {@link NamedXContentRegistry}. + */ public interface Custom extends NamedDiffable, ToXContentFragment { EnumSet context(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java index 1d62054b737c7..8ad1b684a9c7b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamService.java @@ -221,8 +221,9 @@ static ClusterState createDataStream(MetadataCreateIndexService metadataCreateIn Metadata.Builder builder = Metadata.builder(currentState.metadata()).put(newDataStream); List aliases = new ArrayList<>(); - if (template.template() != null && template.template().aliases() != null) { - for (var alias : template.template().aliases().values()) { + var resolvedAliases = MetadataIndexTemplateService.resolveAliases(currentState.metadata(), template); + for (var resolvedAliasMap : resolvedAliases) { + for (var alias : resolvedAliasMap.values()) { aliases.add(alias.getAlias()); builder.put(alias.getAlias(), dataStreamName, alias.writeIndex(), alias.filter() == null ? null : alias.filter().string()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java index c63d29eb2ce07..787e5a9d8f90e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataCreateIndexService.java @@ -577,7 +577,7 @@ private ClusterState applyCreateIndexRequestForSystemDataStream(final ClusterSta return applyCreateIndexWithTemporaryService(currentState, request, silent, null, tmpImd, mappings, indexService -> resolveAndValidateAliases(request.index(), request.aliases(), - MetadataIndexTemplateService.resolveAliases(template, componentTemplates, null), currentState.metadata(), + MetadataIndexTemplateService.resolveAliases(template, componentTemplates), currentState.metadata(), // the context is only used for validation so it's fine to pass fake values for the // shard id and the current timestamp aliasValidator, xContentRegistry, indexService.newSearchExecutionContext(0, 0, null, () -> 0L, null, emptyMap()), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index de9201a0f839c..72613af1cfaf1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -1112,25 +1112,31 @@ public static List> resolveAliases(final List> resolveAliases(final Metadata metadata, final String templateName) { final ComposableIndexTemplate template = metadata.templatesV2().get(templateName); assert template != null : "attempted to resolve aliases for a template [" + templateName + "] that did not exist in the cluster state"; + return resolveAliases(metadata, template); + } + + /** + * Resolve the given v2 template into an ordered list of aliases + */ + static List> resolveAliases(final Metadata metadata, final ComposableIndexTemplate template) { if (template == null) { return List.of(); } final Map componentTemplates = metadata.componentTemplates(); - return resolveAliases(template, componentTemplates, templateName); + return resolveAliases(template, componentTemplates); } /** * Resolve the given v2 template and component templates into an ordered list of aliases */ static List> resolveAliases(final ComposableIndexTemplate template, - final Map componentTemplates, - @Nullable String templateName) { + final Map componentTemplates) { Objects.requireNonNull(template, "attempted to resolve aliases for a null template"); Objects.requireNonNull(componentTemplates, "attempted to resolve aliases with null component templates"); List> aliases = template.composedOf().stream() diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 912b8153f5c05..121ecff565c2a 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -92,7 +92,6 @@ protected void addDoc(LuceneDocument doc) { private final Set newFieldsSeen; private final Map dynamicObjectMappers; private final List dynamicRuntimeFields; - private final Set shadowedFields; private Field version; private SeqNoFieldMapper.SequenceIDFields seqID; @@ -108,7 +107,6 @@ private DocumentParserContext(DocumentParserContext in) { this.newFieldsSeen = in.newFieldsSeen; this.dynamicObjectMappers = in.dynamicObjectMappers; this.dynamicRuntimeFields = in.dynamicRuntimeFields; - this.shadowedFields = in.shadowedFields; this.version = in.version; this.seqID = in.seqID; } @@ -129,17 +127,6 @@ protected DocumentParserContext(MappingLookup mappingLookup, this.newFieldsSeen = new HashSet<>(); this.dynamicObjectMappers = new HashMap<>(); this.dynamicRuntimeFields = new ArrayList<>(); - this.shadowedFields = buildShadowedFields(mappingLookup); - } - - private static Set buildShadowedFields(MappingLookup lookup) { - Set shadowedFields = new HashSet<>(); - for (RuntimeField runtimeField : lookup.getMapping().getRoot().runtimeFields()) { - for (MappedFieldType mft : runtimeField.asMappedFieldTypes()) { - shadowedFields.add(mft.name()); - } - } - return shadowedFields; } public final IndexSettings indexSettings() { @@ -243,7 +230,7 @@ public final List getDynamicMappers() { } public final boolean isShadowed(String field) { - return shadowedFields.contains(field); + return mappingLookup.isShadowed(field); } public final ObjectMapper getObjectMapper(String name) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java index 61594fa0144da..d3acede018309 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingLookup.java @@ -16,6 +16,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -51,6 +52,7 @@ private CacheKey() {} private final Map indexAnalyzersMap = new HashMap<>(); private final List indexTimeScriptMappers = new ArrayList<>(); private final Mapping mapping; + private final Set shadowedFields; /** * Creates a new {@link MappingLookup} instance by parsing the provided mapping and extracting its field definitions. @@ -157,6 +159,13 @@ private MappingLookup(Mapping mapping, } } + this.shadowedFields = new HashSet<>(); + for (RuntimeField runtimeField : mapping.getRoot().runtimeFields()) { + for (MappedFieldType mft : runtimeField.asMappedFieldTypes()) { + shadowedFields.add(mft.name()); + } + } + this.fieldTypeLookup = new FieldTypeLookup(mappers, aliasMappers, mapping.getRoot().runtimeFields()); this.indexTimeLookup = new FieldTypeLookup(mappers, aliasMappers, Collections.emptyList()); this.fieldMappers = Collections.unmodifiableMap(fieldMappers); @@ -199,6 +208,13 @@ public Iterable fieldMappers() { return fieldMappers.values(); } + /** + * @return {@code true} if the given field is shadowed by a runtime field + */ + public boolean isShadowed(String field) { + return shadowedFields.contains(field); + } + void checkLimits(IndexSettings settings) { checkFieldLimit(settings.getMappingTotalFieldsLimit()); checkObjectDepthLimit(settings.getMappingDepthLimit()); diff --git a/server/src/main/java/org/elasticsearch/index/query/TypeQueryV7Builder.java b/server/src/main/java/org/elasticsearch/index/query/TypeQueryV7Builder.java new file mode 100644 index 0000000000000..915662f4e9893 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/TypeQueryV7Builder.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ParseField; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.core.RestApiVersion; +import org.elasticsearch.index.mapper.MapperService; + +import java.io.IOException; + +public class TypeQueryV7Builder extends AbstractQueryBuilder { + private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(TypeQueryV7Builder.class); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal] Type queries are deprecated, " + + "prefer to filter on a field instead."; + + private static final String NAME = "type"; + public static final ParseField NAME_V7 = new ParseField(NAME).forRestApiVersion(RestApiVersion.equalTo(RestApiVersion.V_7)); + private static final ParseField VALUE_FIELD = new ParseField("value"); + private static final ObjectParser PARSER = new ObjectParser<>(NAME, TypeQueryV7Builder::new); + + static { + PARSER.declareString(QueryBuilder::queryName, + AbstractQueryBuilder.NAME_FIELD.forRestApiVersion(RestApiVersion.equalTo(RestApiVersion.V_7))); + PARSER.declareFloat(QueryBuilder::boost, + AbstractQueryBuilder.BOOST_FIELD.forRestApiVersion(RestApiVersion.equalTo(RestApiVersion.V_7))); + PARSER.declareString(TypeQueryV7Builder::setValue, + VALUE_FIELD.forRestApiVersion(RestApiVersion.equalTo(RestApiVersion.V_7))); + } + + private String value; + + public TypeQueryV7Builder() { + } + + /** + * Read from a stream. + */ + public TypeQueryV7Builder(StreamInput in) throws IOException { + super(in); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.field(VALUE_FIELD.getPreferredName(), MapperService.SINGLE_MAPPING_NAME); + printBoostAndQueryName(builder); + builder.endObject(); + } + + @Override + protected Query doToQuery(SearchExecutionContext context) throws IOException { + return new MatchNoDocsQuery(); + } + + @Override + protected boolean doEquals(TypeQueryV7Builder other) { + return true; + } + + @Override + protected int doHashCode() { + return 0; + } + + public static TypeQueryV7Builder fromXContent(XContentParser parser) throws IOException { + deprecationLogger.compatibleApiWarning("type_query", TYPES_DEPRECATION_MESSAGE); + throw new ParsingException(parser.getTokenLocation(), TYPES_DEPRECATION_MESSAGE); + } + + @Override + public String getWriteableName() { + return NAME; + } + + public void setValue(String value){ + this.value = value; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 40e25d0cd670d..b1ecd5f61460d 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -11,7 +11,6 @@ import org.apache.lucene.search.BooleanQuery; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.NamedRegistry; -import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.geo.GeoShapeType; import org.elasticsearch.common.geo.ShapesAvailability; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -21,8 +20,10 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ParseField; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.BoostingQueryBuilder; import org.elasticsearch.index.query.CombinedFieldsQueryBuilder; @@ -67,6 +68,7 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.query.TermsSetQueryBuilder; +import org.elasticsearch.index.query.TypeQueryV7Builder; import org.elasticsearch.index.query.WildcardQueryBuilder; import org.elasticsearch.index.query.WrapperQueryBuilder; import org.elasticsearch.index.query.functionscore.ExponentialDecayFunctionBuilder; @@ -839,6 +841,10 @@ private void registerQueryParsers(List plugins) { } registerFromPlugin(plugins, SearchPlugin::getQueries, this::registerQuery); + + if (RestApiVersion.minimumSupported() == RestApiVersion.V_7) { + registerQuery(new QuerySpec<>(TypeQueryV7Builder.NAME_V7, TypeQueryV7Builder::new, TypeQueryV7Builder::fromXContent)); + } } private void registerIntervalsSourceProviders() { @@ -893,4 +899,5 @@ private void registerBoolQuery(ParseField name, Writeable.Reader r public FetchPhase getFetchPhase() { return new FetchPhase(fetchSubPhases); } + } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 9f4133fa7d4b8..3b69d14ab2bb5 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -2204,7 +2204,7 @@ public void clusterStateProcessed(String source, ClusterState oldState, ClusterS } } } - }, "delete snapshot", listener::onFailure); + }, "delete snapshot [" + repository + "]" + Arrays.toString(snapshotNames), listener::onFailure); } private static List matchingSnapshotIds( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java index 3edadb8c35637..9f8b20b180a40 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataCreateDataStreamServiceTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate.DataStreamTemplate; import org.elasticsearch.cluster.metadata.MetadataCreateDataStreamService.CreateDataStreamClusterStateUpdateRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.ExecutorNames; @@ -75,13 +76,8 @@ public void testCreateDataStreamWithAliasFromTemplate() throws Exception { final int aliasCount = randomIntBetween(0, 3); Map aliases = new HashMap<>(aliasCount); for (int k = 0; k < aliasCount; k++) { - final String aliasName = randomAlphaOfLength(6); - var builder = AliasMetadata.newAliasMetadataBuilder(aliasName); - if (randomBoolean()) { - builder.filter(Map.of("term", Map.of("user", Map.of("value", randomAlphaOfLength(5))))); - } - builder.writeIndex(randomBoolean()); - aliases.put(aliasName, builder.build()); + final AliasMetadata am = randomAlias(null); + aliases.put(am.alias(), am); } ComposableIndexTemplate template = new ComposableIndexTemplate.Builder() .indexPatterns(List.of(dataStreamName + "*")) @@ -118,6 +114,83 @@ public void testCreateDataStreamWithAliasFromTemplate() throws Exception { assertThat(newState.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, 1)).isSystem(), is(false)); } + public void testCreateDataStreamWithAliasFromComponentTemplate() throws Exception { + final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); + final String dataStreamName = "my-data-stream"; + final int componentTemplateCount = randomIntBetween(0, 3); + final int aliasCount = randomIntBetween(0, 3); + int totalAliasCount = aliasCount; + Map aliases = new HashMap<>(); + for (int k = 0; k < aliasCount; k++) { + final AliasMetadata am = randomAlias(null); + aliases.put(am.alias(), am); + } + + List ctNames = new ArrayList<>(); + List> allAliases = new ArrayList<>(); + var metadataBuilder = Metadata.builder(); + final List componentTemplates = new ArrayList<>(componentTemplateCount); + for (int k = 0; k < componentTemplateCount; k++) { + final String ctName = randomAlphaOfLength(5); + ctNames.add(ctName); + final int ctAliasCount = randomIntBetween(0, 3); + totalAliasCount += ctAliasCount; + final var ctAliasMap = new HashMap(ctAliasCount); + allAliases.add(ctAliasMap); + for (int m = 0; m < ctAliasCount; m++) { + final AliasMetadata am = randomAlias(ctName); + ctAliasMap.put(am.alias(), am); + } + metadataBuilder.put(ctName, new ComponentTemplate(new Template(null, null, ctAliasMap), null, null)); + } + allAliases.add(aliases); + + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder() + .indexPatterns(List.of(dataStreamName + "*")) + .dataStreamTemplate(new DataStreamTemplate()) + .template(new Template(null, null, aliases)) + .componentTemplates(ctNames) + .build(); + + ClusterState cs = ClusterState.builder(new ClusterName("_name")) + .metadata(metadataBuilder.put("template", template).build()) + .build(); + CreateDataStreamClusterStateUpdateRequest req = + new CreateDataStreamClusterStateUpdateRequest(dataStreamName, TimeValue.ZERO, TimeValue.ZERO); + ClusterState newState = MetadataCreateDataStreamService.createDataStream(metadataCreateIndexService, cs, req); + assertThat(newState.metadata().dataStreams().size(), equalTo(1)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).getName(), equalTo(dataStreamName)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isSystem(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isHidden(), is(false)); + assertThat(newState.metadata().dataStreams().get(dataStreamName).isReplicated(), is(false)); + assertThat(newState.metadata().dataStreamAliases().size(), is(totalAliasCount)); + for (var aliasMap : allAliases) { + for (var alias : aliasMap.values()) { + var actualAlias = newState.metadata().dataStreamAliases().get(alias.alias()); + assertThat(actualAlias, is(notNullValue())); + assertThat(actualAlias.getName(), equalTo(alias.alias())); + assertThat(actualAlias.getFilter(), equalTo(alias.filter())); + assertThat(actualAlias.getWriteDataStream(), equalTo(alias.writeIndex() ? dataStreamName : null)); + } + } + + assertThat(newState.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, 1)), notNullValue()); + assertThat(newState.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, 1)).getAliases().size(), is(0)); + assertThat(newState.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, 1)).getSettings().get("index.hidden"), + equalTo("true")); + assertThat(newState.metadata().index(DataStream.getDefaultBackingIndexName(dataStreamName, 1)).isSystem(), is(false)); + } + + private static AliasMetadata randomAlias(String prefix) { + final String aliasName = (Strings.isNullOrEmpty(prefix) ? "" : prefix + "-") + randomAlphaOfLength(6); + var builder = AliasMetadata.newAliasMetadataBuilder(aliasName); + if (randomBoolean()) { + builder.filter(Map.of("term", Map.of("user", Map.of("value", randomAlphaOfLength(5))))); + } + builder.writeIndex(randomBoolean()); + return builder.build(); + } + public void testCreateSystemDataStream() throws Exception { final MetadataCreateIndexService metadataCreateIndexService = getMetadataCreateIndexService(); final String dataStreamName = ".system-data-stream"; diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 9241b803acabe..e74dd18e767b3 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TypeQueryV7Builder; import org.elasticsearch.index.query.functionscore.GaussDecayFunctionBuilder; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -241,11 +242,14 @@ public void testRegisteredQueries() { List allSupportedQueries = new ArrayList<>(); Collections.addAll(allSupportedQueries, NON_DEPRECATED_QUERIES); Collections.addAll(allSupportedQueries, DEPRECATED_QUERIES); + Collections.addAll(allSupportedQueries, REST_COMPATIBLE_QUERIES); + SearchModule module = new SearchModule(Settings.EMPTY, emptyList()); Set registeredNonDeprecated = module.getNamedXContents().stream() .filter(e -> e.categoryClass.equals(QueryBuilder.class)) .filter(e -> e.name.getAllReplacedWith() == null) + .filter(e -> RestApiVersion.current().matches(e.restApiCompatibility)) .map(e -> e.name.getPreferredName()) .collect(toSet()); Set registeredAll = module.getNamedXContents().stream() @@ -389,6 +393,7 @@ public CheckedBiConsumer getReque //add here deprecated queries to make sure we log a deprecation warnings when they are used private static final String[] DEPRECATED_QUERIES = new String[] {"field_masking_span", "geo_polygon"}; + private static final String[] REST_COMPATIBLE_QUERIES = new String[] {TypeQueryV7Builder.NAME_V7.getPreferredName()}; /** * Dummy test {@link AggregationBuilder} used to test registering aggregation builders. @@ -669,14 +674,15 @@ public List> getQueries() { .filter(e -> RestApiVersion.minimumSupported().matches(e.restApiCompatibility)) .filter(e -> RestApiVersion.current().matches(e.restApiCompatibility)) .collect(toSet()), - hasSize(searchModule.getNamedXContents().size() - 1)); + hasSize(searchModule.getNamedXContents().size()- REST_COMPATIBLE_QUERIES.length - 1 )); final List compatEntry = searchModule.getNamedXContents().stream() .filter(e -> e.categoryClass.equals(QueryBuilder.class) && - e.name.match(CompatQueryBuilder.NAME_OLD.getPreferredName(), LoggingDeprecationHandler.INSTANCE)) + RestApiVersion.minimumSupported().matches(e.name.getForRestApiVersion()) // v7 compatbile + && RestApiVersion.current().matches(e.name.getForRestApiVersion()) == false) // but not v8 compatible .collect(toList()); - assertThat(compatEntry, hasSize(1)); + assertThat(compatEntry, hasSize(REST_COMPATIBLE_QUERIES.length + 1));//+1 because of registered in the test assertTrue(RestApiVersion.minimumSupported().matches(compatEntry.get(0).restApiCompatibility)); assertFalse(RestApiVersion.current().matches(compatEntry.get(0).restApiCompatibility)); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index ba1aa724455ab..9850d3802b70e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -42,6 +42,7 @@ import java.util.stream.Stream; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.equalTo; public class SnapshotsInProgressSerializationTests extends AbstractDiffableWireSerializationTestCase { @@ -375,9 +376,13 @@ public void testXContent() throws IOException { new ShardId("index", "uuid", 0), SnapshotsInProgress.ShardSnapshotStatus.success( "nodeId", - new ShardSnapshotResult("generation", new ByteSizeValue(1L), 1) + new ShardSnapshotResult("shardgen", new ByteSizeValue(1L), 1) ) ) + .fPut( + new ShardId("index", "uuid", 1), + new SnapshotsInProgress.ShardSnapshotStatus("nodeId", ShardState.FAILED, "failure-reason", "fail-gen") + ) .build(), null, null, @@ -394,13 +399,31 @@ public void testXContent() throws IOException { String json = Strings.toString(builder); assertThat( json, - equalTo( - "{\"snapshots\":[{\"repository\":\"repo\",\"snapshot\":\"name\",\"uuid\":\"uuid\"," - + "\"include_global_state\":true,\"partial\":true,\"state\":\"SUCCESS\"," - + "\"indices\":[{\"name\":\"index\",\"id\":\"uuid\"}],\"start_time\":\"1970-01-01T00:20:34.567Z\"," - + "\"start_time_millis\":1234567,\"repository_state_id\":0," - + "\"shards\":[{\"index\":{\"index_name\":\"index\",\"index_uuid\":\"uuid\"}," - + "\"shard\":0,\"state\":\"SUCCESS\",\"node\":\"nodeId\"}],\"feature_states\":[],\"data_streams\":[]}]}" + anyOf( + equalTo( + "{\"snapshots\":[{\"repository\":\"repo\",\"snapshot\":\"name\",\"uuid\":\"uuid\"," + + "\"include_global_state\":true,\"partial\":true,\"state\":\"SUCCESS\"," + + "\"indices\":[{\"name\":\"index\",\"id\":\"uuid\"}],\"start_time\":\"1970-01-01T00:20:34.567Z\"," + + "\"start_time_millis\":1234567,\"repository_state_id\":0,\"shards\":[" + + "{\"index\":{\"index_name\":\"index\",\"index_uuid\":\"uuid\"},\"shard\":0,\"state\":\"SUCCESS\"," + + "\"generation\":\"shardgen\",\"node\":\"nodeId\"," + + "\"result\":{\"generation\":\"shardgen\",\"size\":\"1b\",\"size_in_bytes\":1,\"segments\":1}}," + + "{\"index\":{\"index_name\":\"index\",\"index_uuid\":\"uuid\"},\"shard\":1,\"state\":\"FAILED\"," + + "\"generation\":\"fail-gen\",\"node\":\"nodeId\",\"reason\":\"failure-reason\"}" + + "],\"feature_states\":[],\"data_streams\":[]}]}" + ), // or the shards might be in the other order: + equalTo( + "{\"snapshots\":[{\"repository\":\"repo\",\"snapshot\":\"name\",\"uuid\":\"uuid\"," + + "\"include_global_state\":true,\"partial\":true,\"state\":\"SUCCESS\"," + + "\"indices\":[{\"name\":\"index\",\"id\":\"uuid\"}],\"start_time\":\"1970-01-01T00:20:34.567Z\"," + + "\"start_time_millis\":1234567,\"repository_state_id\":0,\"shards\":[" + + "{\"index\":{\"index_name\":\"index\",\"index_uuid\":\"uuid\"},\"shard\":1,\"state\":\"FAILED\"," + + "\"generation\":\"fail-gen\",\"node\":\"nodeId\",\"reason\":\"failure-reason\"}," + + "{\"index\":{\"index_name\":\"index\",\"index_uuid\":\"uuid\"},\"shard\":0,\"state\":\"SUCCESS\"," + + "\"generation\":\"shardgen\",\"node\":\"nodeId\"," + + "\"result\":{\"generation\":\"shardgen\",\"size\":\"1b\",\"size_in_bytes\":1,\"segments\":1}}" + + "],\"feature_states\":[],\"data_streams\":[]}]}" + ) ) ); } diff --git a/x-pack/docs/en/security/images/elastic-security-overview.png b/x-pack/docs/en/security/images/elastic-security-overview.png index 8813e8a76a70e..4cf6b08f5a716 100644 Binary files a/x-pack/docs/en/security/images/elastic-security-overview.png and b/x-pack/docs/en/security/images/elastic-security-overview.png differ diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index f5c79485f1b3b..a27aa10215ef9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -153,7 +153,7 @@ public AnalysisConfig(StreamInput in) throws IOException { influencers = Collections.unmodifiableList(in.readStringList()); multivariateByFields = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getVersion().onOrAfter(Version.V_7_15_0)) { modelPruneWindow = in.readOptionalTimeValue(); } else { modelPruneWindow = null; @@ -179,7 +179,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(multivariateByFields); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getVersion().onOrAfter(Version.V_7_15_0)) { out.writeOptionalTimeValue(modelPruneWindow); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java index 2043f32715409..d4bf887df3831 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobUpdate.java @@ -169,7 +169,7 @@ public JobUpdate(StreamInput in) throws IOException { allowLazyOpen = in.readOptionalBoolean(); blocked = in.readOptionalWriteable(Blocked::new); - if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + if (in.getVersion().onOrAfter(Version.V_7_15_0)) { modelPruneWindow = in.readOptionalTimeValue(); } else { modelPruneWindow = null; @@ -217,7 +217,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalBoolean(allowLazyOpen); out.writeOptionalWriteable(blocked); - if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + if (out.getVersion().onOrAfter(Version.V_7_15_0)) { out.writeOptionalTimeValue(modelPruneWindow); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/enrollment/TransportKibanaEnrollmentAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/enrollment/TransportKibanaEnrollmentAction.java index b882a7a80a5ec..1ca47ab7a8911 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/enrollment/TransportKibanaEnrollmentAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/enrollment/TransportKibanaEnrollmentAction.java @@ -91,7 +91,7 @@ public class TransportKibanaEnrollmentAction extends HandledTransportAction originalKeysAndCerts = CertParsingUtils.readPkcs12KeyPairs(original, originalPassword, p -> originalPassword); Certificate deserializedCert = CertParsingUtils.readCertificates( - new ByteArrayInputStream(Base64.getUrlDecoder().decode(cert.getBytes(StandardCharsets.UTF_8)))).get(0); + new ByteArrayInputStream(Base64.getDecoder().decode(cert.getBytes(StandardCharsets.UTF_8)))).get(0); assertThat(originalKeysAndCerts, hasKey(deserializedCert)); assertThat(deserializedCert, instanceOf(X509Certificate.class)); if (isCa) { diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/GroupByOptimizerTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/GroupByOptimizerTests.java index eb7263f040518..31cca2be8ef71 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/GroupByOptimizerTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/pivot/GroupByOptimizerTests.java @@ -43,13 +43,13 @@ public void testOneGroupBy() { public void testOrderByType() { Map groups = new LinkedHashMap<>(); - groups.put("terms1", randomTermsGroupSourceNoScript()); - groups.put("date1", randomDateHistogramGroupSourceNoScript()); - groups.put("terms2", randomTermsGroupSourceNoScript()); - groups.put("date2", randomDateHistogramGroupSourceNoScript()); - groups.put("hist1", randomHistogramGroupSourceNoScript()); + groups.put("terms1", randomTermsGroupSourceNoScript("t1")); + groups.put("date1", randomDateHistogramGroupSourceNoScript("d1")); + groups.put("terms2", randomTermsGroupSourceNoScript("t2")); + groups.put("date2", randomDateHistogramGroupSourceNoScript("d2")); + groups.put("hist1", randomHistogramGroupSourceNoScript("h1")); groups.put("geo1", randomGeoTileGroupSource()); - groups.put("hist2", randomHistogramGroupSourceNoScript()); + groups.put("hist2", randomHistogramGroupSourceNoScript("h1")); List groupNames = GroupByOptimizer.reorderGroups(Collections.unmodifiableMap(groups), Collections.emptySet()) .stream()