Skip to content

Commit

Permalink
Merge branch 'master' into repository-encrypted-client-side-reformated
Browse files Browse the repository at this point in the history
  • Loading branch information
albertzaharovits committed Dec 1, 2020
2 parents cf8c7fd + f5fa0e3 commit 652a51e
Show file tree
Hide file tree
Showing 407 changed files with 2,269 additions and 2,125 deletions.
16 changes: 2 additions & 14 deletions buildSrc/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
*/

import org.gradle.internal.jvm.Jvm
import org.gradle.util.GradleVersion

plugins {
id 'java-gradle-plugin'
Expand Down Expand Up @@ -165,17 +164,6 @@ if (project != rootProject) {
tasks.named("thirdPartyAudit").configure {
enabled = false
}
if (org.elasticsearch.gradle.info.BuildParams.inFipsJvm) {
// We don't support running gradle with a JVM that is in FIPS 140 mode, so we don't test it.
// WaitForHttpResourceTests tests would fail as they use JKS/PKCS12 keystores
tasks.named("test").configure {
enabled = false;
}
tasks.named("testingConventions").configure {
enabled = false
}
}

configurations.register("distribution")
configurations.register("reaper")

Expand Down Expand Up @@ -244,16 +232,16 @@ if (project != rootProject) {
tasks.register("integTest", Test) {
inputs.dir(file("src/testKit")).withPropertyName("testkit dir").withPathSensitivity(PathSensitivity.RELATIVE)
systemProperty 'test.version_under_test', version
onlyIf { org.elasticsearch.gradle.info.BuildParams.inFipsJvm == false }
maxParallelForks = System.getProperty('tests.jvms', org.elasticsearch.gradle.info.BuildParams.defaultParallel.toString()) as Integer
testClassesDirs = sourceSets.integTest.output.classesDirs
classpath = sourceSets.integTest.runtimeClasspath
}
tasks.named("check").configure { dependsOn("integTest") }

// for now we hardcode the tests for our build to use the gradle jvm.
tasks.withType(Test).configureEach {
onlyIf { org.elasticsearch.gradle.info.BuildParams.inFipsJvm == false }
it.executable = Jvm.current().getJavaExecutable()
maxParallelForks = providers.systemProperty('tests.jvms').forUseAtConfigurationTime().getOrElse(org.elasticsearch.gradle.info.BuildParams.defaultParallel.toString()) as Integer
}

/*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,13 @@

import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class ExplainLifecycleResponseTests extends AbstractXContentTestCase<ExplainLifecycleResponse> {
Expand Down Expand Up @@ -59,8 +58,7 @@ protected boolean assertToXContentEquivalence() {

@Override
protected NamedXContentRegistry xContentRegistry() {
List<NamedXContentRegistry.Entry> entries = new ArrayList<>(ClusterModule.getNamedXWriteables());
entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse));
return new NamedXContentRegistry(entries);
return new NamedXContentRegistry(CollectionUtils.appendToCopy(ClusterModule.getNamedXWriteables(),
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,14 @@
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ToXContentObject;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
import java.util.function.Predicate;
import java.util.function.Supplier;
Expand Down Expand Up @@ -160,8 +159,7 @@ public String toString() {

@Override
protected NamedXContentRegistry xContentRegistry() {
List<NamedXContentRegistry.Entry> entries = new ArrayList<>(ClusterModule.getNamedXWriteables());
entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse));
return new NamedXContentRegistry(entries);
return new NamedXContentRegistry(CollectionUtils.appendToCopy(ClusterModule.getNamedXWriteables(),
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,14 +20,13 @@

import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import org.junit.Before;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.function.Predicate;

public class PhaseExecutionInfoTests extends AbstractXContentTestCase<PhaseExecutionInfo> {
Expand Down Expand Up @@ -67,8 +66,7 @@ protected boolean supportsUnknownFields() {

@Override
protected NamedXContentRegistry xContentRegistry() {
List<NamedXContentRegistry.Entry> entries = new ArrayList<>(ClusterModule.getNamedXWriteables());
entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse));
return new NamedXContentRegistry(entries);
return new NamedXContentRegistry(CollectionUtils.appendToCopy(ClusterModule.getNamedXWriteables(),
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,13 @@
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.test.AbstractXContentTestCase;
import org.junit.Before;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.function.Predicate;

Expand Down Expand Up @@ -70,9 +69,8 @@ protected Predicate<String> getRandomFieldsExcludeFilter() {

@Override
protected NamedXContentRegistry xContentRegistry() {
List<NamedXContentRegistry.Entry> entries = new ArrayList<>(ClusterModule.getNamedXWriteables());
entries.add(new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse));
return new NamedXContentRegistry(entries);
return new NamedXContentRegistry(CollectionUtils.appendToCopy(ClusterModule.getNamedXWriteables(),
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse)));
}

@Override
Expand Down
28 changes: 21 additions & 7 deletions docs/reference/commands/certutil.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ bin/elasticsearch-certutil
| (cert ([--ca <file_path>] | [--ca-cert <file_path> --ca-key <file_path>])
[--ca-dn <name>] [--ca-pass <password>] [--days <n>]
[--dns <domain_name>] [--in <input_file>] [--ip <ip_addresses>]
[--keep-ca-key] [--multiple] [--name <file_name>] [--pem])
[--keep-ca-key] [--multiple] [--name <file_name>] [--pem] [--self-signed])
| (csr [--dns <domain_name>] [--in <input_file>] [--ip <ip_addresses>]
[--name <file_name>])
Expand Down Expand Up @@ -73,15 +73,18 @@ directory name, you must also specify a file name in the `--name` command
parameter or in the `filename` field in an input YAML file.

You can optionally provide IP addresses or DNS names for each instance. If
neither IP addresses nor DNS names are specified, the Elastic stack products
neither IP addresses nor DNS names are specified, the Elastic Stack products
cannot perform hostname verification and you might need to configure the
`verification_mode` security setting to `certificate` only. For more information
about this setting, see <<security-settings>>.

All certificates that are generated by this command are signed by a CA. You can
provide your own CA with the `--ca` or `--ca-cert` parameters. Otherwise, the
command automatically generates a new CA for you. For more information about
generating a CA, see the <<certutil-ca,CA mode of this command>>.
All certificates that are generated by this command are signed by a CA unless
the `--self-signed` parameter is specified. You can provide your own CA with the
`--ca` or `--ca-cert` and `--ca-key` parameters. Otherwise, the command automatically generates a new CA for you.
deprecated:[7.11.0,"Generating certificates without specifying a CA certificate and key is deprecated. In the next major version you must provide a CA certificate unless the `--self-signed` option is specified."]
For more information about generating a CA, see the
<<certutil-ca,CA mode of this command>>.
To generate self-signed certificates, use the `--self-signed` parameter.

By default, the `cert` mode produces a single PKCS#12 output file which holds
the instance certificate, the instance private key, and the CA certificate. If
Expand Down Expand Up @@ -211,6 +214,17 @@ wish to password-protect your PEM keys, then do not specify
`--pem`:: Generates certificates and keys in PEM format instead of PKCS#12. This
parameter cannot be used with the `csr` parameter.

`--self-signed`:: Generates self-signed certificates. This parameter is only
applicable to the `cert` parameter.
+
--
NOTE: This option is not recommended for <<ssl-tls,setting up TLS on a cluster>>.
In fact, a self-signed certificate should be used only when you can be sure
that a CA is definitely not needed and trust is directly given to the
certificate itself.

--

`-s, --silent`:: Shows minimal output.

`-v, --verbose`:: Shows verbose output.
Expand Down Expand Up @@ -300,7 +314,7 @@ output file, there is a directory for each instance that was listed in the
file, which contains the instance certificate, instance private key, and CA
certificate.

You an also use the YAML file to generate certificate signing requests. For
You can also use the YAML file to generate certificate signing requests. For
example:

[source, sh]
Expand Down
66 changes: 33 additions & 33 deletions docs/reference/datatiers.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -2,24 +2,24 @@
[[data-tiers]]
== Data tiers

A _data tier_ is a collection of nodes with the same data role that
typically share the same hardware profile:
A _data tier_ is a collection of nodes with the same data role that
typically share the same hardware profile:

* <<content-tier, Content tier>> nodes handle the indexing and query load for content such as a product catalog.
* <<hot-tier, Hot tier>> nodes handle the indexing load for time series data such as logs or metrics
and hold your most recent, most-frequently-accessed data.
* <<warm-tier, Warm tier>> nodes hold time series data that is accessed less-frequently
and hold your most recent, most-frequently-accessed data.
* <<warm-tier, Warm tier>> nodes hold time series data that is accessed less-frequently
and rarely needs to be updated.
* <<cold-tier, Cold tier>> nodes hold time series data that is accessed occasionally and not normally updated.

When you index documents directly to a specific index, they remain on content tier nodes indefinitely.
When you index documents directly to a specific index, they remain on content tier nodes indefinitely.

When you index documents to a data stream, they initially reside on hot tier nodes.
When you index documents to a data stream, they initially reside on hot tier nodes.
You can configure <<index-lifecycle-management, {ilm}>> ({ilm-init}) policies
to automatically transition your time series data through the hot, warm, and cold tiers
according to your performance, resiliency and data retention requirements.
to automatically transition your time series data through the hot, warm, and cold tiers
according to your performance, resiliency and data retention requirements.

A node's <<data-node, data role>> is configured in `elasticsearch.yml`.
A node's <<data-node, data role>> is configured in `elasticsearch.yml`.
For example, the highest-performance nodes in a cluster might be assigned to both the hot and content tiers:

[source,yaml]
Expand All @@ -33,9 +33,9 @@ node.roles: ["data_hot", "data_content"]

Data stored in the content tier is generally a collection of items such as a product catalog or article archive.
Unlike time series data, the value of the content remains relatively constant over time,
so it doesn't make sense to move it to a tier with different performance characteristics as it ages.
Content data typically has long data retention requirements, and you want to be able to retrieve
items quickly regardless of how old they are.
so it doesn't make sense to move it to a tier with different performance characteristics as it ages.
Content data typically has long data retention requirements, and you want to be able to retrieve
items quickly regardless of how old they are.

Content tier nodes are usually optimized for query performance--they prioritize processing power over IO throughput
so they can process complex searches and aggregations and return results quickly.
Expand All @@ -49,10 +49,10 @@ New indices are automatically allocated to the <<content-tier>> unless they are
[[hot-tier]]
=== Hot tier

The hot tier is the {es} entry point for time series data and holds your most-recent,
most-frequently-searched time series data.
Nodes in the hot tier need to be fast for both reads and writes,
which requires more hardware resources and faster storage (SSDs).
The hot tier is the {es} entry point for time series data and holds your most-recent,
most-frequently-searched time series data.
Nodes in the hot tier need to be fast for both reads and writes,
which requires more hardware resources and faster storage (SSDs).
For resiliency, indices in the hot tier should be configured to use one or more replicas.

New indices that are part of a <<data-streams, data stream>> are automatically allocated to the
Expand All @@ -62,51 +62,51 @@ hot tier.
[[warm-tier]]
=== Warm tier

Time series data can move to the warm tier once it is being queried less frequently
than the recently-indexed data in the hot tier.
The warm tier typically holds data from recent weeks.
Time series data can move to the warm tier once it is being queried less frequently
than the recently-indexed data in the hot tier.
The warm tier typically holds data from recent weeks.
Updates are still allowed, but likely infrequent.
Nodes in the warm tier generally don't need to be as fast as those in the hot tier.
Nodes in the warm tier generally don't need to be as fast as those in the hot tier.
For resiliency, indices in the warm tier should be configured to use one or more replicas.

[discrete]
[[cold-tier]]
=== Cold tier

Once data in the warm tier is no longer being updated, it can move to the cold tier.
The cold tier typically holds the data from recent months or years.
Once data is no longer being updated, it can move from the warm tier to the cold tier where it
stays for the rest of its life.
The cold tier is still a responsive query tier, but data in the cold tier is not normally updated.
As data transitions into the cold tier it can be compressed and shrunken.
For resiliency, indices in the cold tier can rely on
<<ilm-searchable-snapshot, searchable snapshots>>, eliminating the need for replicas.
For resiliency, indices in the cold tier can rely on
<<ilm-searchable-snapshot, searchable snapshots>>, eliminating the need for replicas.

[discrete]
[[data-tier-allocation]]
=== Data tier index allocation

When you create an index, by default {es} sets
When you create an index, by default {es} sets
<<tier-preference-allocation-filter, `index.routing.allocation.include._tier_preference`>>
to `data_content` to automatically allocate the index shards to the content tier.

When {es} creates an index as part of a <<data-streams, data stream>>,
by default {es} sets
When {es} creates an index as part of a <<data-streams, data stream>>,
by default {es} sets
<<tier-preference-allocation-filter, `index.routing.allocation.include._tier_preference`>>
to `data_hot` to automatically allocate the index shards to the hot tier.

You can override the automatic tier-based allocation by specifying
You can override the automatic tier-based allocation by specifying
<<shard-allocation-filtering, shard allocation filtering>>
settings in the create index request or index template that matches the new index.

You can also explicitly set `index.routing.allocation.include._tier_preference`
to opt out of the default tier-based allocation.
You can also explicitly set `index.routing.allocation.include._tier_preference`
to opt out of the default tier-based allocation.
If you set the tier preference to `null`, {es} ignores the data tier roles during allocation.

[discrete]
[[data-tier-migration]]
=== Automatic data tier migration

{ilm-init} automatically transitions managed
indices through the available data tiers using the <<ilm-migrate, migrate>> action.
By default, this action is automatically injected in every phase.
You can explicitly specify the migrate action to override the default behavior,
indices through the available data tiers using the <<ilm-migrate, migrate>> action.
By default, this action is automatically injected in every phase.
You can explicitly specify the migrate action to override the default behavior,
or use the <<ilm-allocate, allocate action>> to manually specify allocation rules.
2 changes: 2 additions & 0 deletions docs/reference/docs/reindex.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -444,6 +444,8 @@ include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=wait_for_completion]

include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=requests_per_second]

include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=require-alias]

include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=scroll]

include::{es-repo-dir}/rest-api/common-parms.asciidoc[tag=slices]
Expand Down
2 changes: 1 addition & 1 deletion docs/reference/modules/node.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ node.roles: [ data_warm ]
[[data-cold-node]]
==== [x-pack]#Cold data node#

Cold data nodes store read-only indices that are accessed less frequently. This tier uses less performant hardware and may leverage snapshot-backed indices to minimize the resources required.
Cold data nodes store read-only indices that are accessed less frequently. This tier uses less performant hardware and may leverage searchable snapshot indices to minimize the resources required.

To create a dedicated cold node, set:
[source,yaml]
Expand Down
10 changes: 5 additions & 5 deletions docs/reference/redirects.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -1262,17 +1262,17 @@ See <<wildcard-field-type>>.
[role="exclude",id="searchable-snapshots-api-clear-cache"]
=== Clear cache API

We have removed documentation for this API. This a low-level API used to get
information about snapshot-backed indices. We plan to remove or drastically
change this API as part of a future release.
We have removed documentation for this API. This a low-level API used to clear
the searchable snapshot cache. We plan to remove or drastically change this API
as part of a future release.

For other searchable snapshot APIs, see <<searchable-snapshots-apis>>.

[role="exclude",id="searchable-snapshots-api-stats"]
=== Searchable snapshot statistics API

We have removed documentation for this API. This a low-level API used to get
information about snapshot-backed indices. We plan to remove or drastically
information about searchable snapshot indices. We plan to remove or drastically
change this API as part of a future release.

For other searchable snapshot APIs, see <<searchable-snapshots-apis>>.
Expand All @@ -1281,7 +1281,7 @@ For other searchable snapshot APIs, see <<searchable-snapshots-apis>>.
=== Searchable snapshot repository statistics API

We have removed documentation for this API. This a low-level API used to get
information about snapshot-backed indices. We plan to remove or drastically
information about searchable snapshot indices. We plan to remove or drastically
change this API as part of a future release.

For other searchable snapshot APIs, see <<searchable-snapshots-apis>>.
Expand Down
Loading

0 comments on commit 652a51e

Please sign in to comment.