Skip to content

Commit

Permalink
Merge branch 'master' into ccr
Browse files Browse the repository at this point in the history
* master:
  silence InstallPluginCommandTests, see #30900
  Remove left-over comment
  Fix double semicolon in import statement
  [TEST] Fix minor random bug from #30794
  Include size of snapshot in snapshot metadata #18543, bwc clean up (#30890)
  Enabling testing against an external cluster (#30885)
  Add public key header/footer (#30877)
  SQL: Remove the last remaining server dependencies from jdbc (#30771)
  Include size of snapshot in snapshot metadata (#29602)
  Do not serialize basic license exp in x-pack info (#30848)
  Change BWC version for VerifyRepositoryResponse (#30796)
  [DOCS] Document index name limitations (#30826)
  Harmonize include_defaults tests (#30700)
  • Loading branch information
dnhatn committed May 28, 2018
2 parents 345b496 + 6577f5b commit 03e3bd2
Show file tree
Hide file tree
Showing 57 changed files with 1,075 additions and 962 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -70,31 +70,44 @@ public class RestIntegTestTask extends DefaultTask {
runner.parallelism = '1'
runner.include('**/*IT.class')
runner.systemProperty('tests.rest.load_packaged', 'false')
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
// this is more realistic than just talking to a single node
runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}")
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
// both as separate sysprops
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")

// dump errors and warnings from cluster log on failure
TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
if (state.failure != null) {
for (NodeInfo nodeInfo : nodes) {
printLogExcerpt(nodeInfo)

if (System.getProperty("tests.rest.cluster") == null) {
if (System.getProperty("tests.cluster") != null) {
throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null")
}
// we pass all nodes to the rest cluster to allow the clients to round-robin between them
// this is more realistic than just talking to a single node
runner.systemProperty('tests.rest.cluster', "${-> nodes.collect{it.httpUri()}.join(",")}")
runner.systemProperty('tests.config.dir', "${-> nodes[0].pathConf}")
// TODO: our "client" qa tests currently use the rest-test plugin. instead they should have their own plugin
// that sets up the test cluster and passes this transport uri instead of http uri. Until then, we pass
// both as separate sysprops
runner.systemProperty('tests.cluster', "${-> nodes[0].transportUri()}")

// dump errors and warnings from cluster log on failure
TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() {
@Override
void afterExecute(Task task, TaskState state) {
if (state.failure != null) {
for (NodeInfo nodeInfo : nodes) {
printLogExcerpt(nodeInfo)
}
}
}
}
}
runner.doFirst {
project.gradle.addListener(logDumpListener)
}
runner.doLast {
project.gradle.removeListener(logDumpListener)
runner.doFirst {
project.gradle.addListener(logDumpListener)
}
runner.doLast {
project.gradle.removeListener(logDumpListener)
}
} else {
if (System.getProperty("tests.cluster") == null) {
throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null")
}
// an external cluster was specified and all responsibility for cluster configuration is taken by the user
runner.systemProperty('tests.rest.cluster', System.getProperty("tests.rest.cluster"))
runner.systemProperty('test.cluster', System.getProperty("tests.cluster"))
}

// copy the rest spec/tests into the test resources
Expand All @@ -109,7 +122,10 @@ public class RestIntegTestTask extends DefaultTask {
clusterInit.enabled = false
return // no need to add cluster formation tasks if the task won't run!
}
nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig)
// only create the cluster if needed as otherwise an external cluster to use was specified
if (System.getProperty("tests.rest.cluster") == null) {
nodes = ClusterFormationTasks.setup(project, "${name}Cluster", runner, clusterConfig)
}
super.dependsOn(runner.finalizedBy)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
import org.elasticsearch.env.Environment;

import java.io.BufferedReader;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
Expand All @@ -71,7 +72,6 @@
import java.nio.file.attribute.PosixFilePermissions;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.security.Security;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Base64;
Expand Down Expand Up @@ -543,8 +543,8 @@ void verifySignature(final Path zip, final String urlString) throws IOException,
InputStream fin = pluginZipInputStream(zip);
// sin is a URL stream to the signature corresponding to the downloaded plugin zip
InputStream sin = urlOpenStream(ascUrl);
// pin is a decoded base64 stream over the embedded public key in RFC2045 format
InputStream pin = Base64.getMimeDecoder().wrap(getPublicKey())) {
// pin is a input stream to the public key in ASCII-Armor format (RFC4880); the Armor data is in RFC2045 format
InputStream pin = getPublicKey()) {
final JcaPGPObjectFactory factory = new JcaPGPObjectFactory(PGPUtil.getDecoderStream(sin));
final PGPSignature signature = ((PGPSignatureList) factory.nextObject()).get(0);

Expand All @@ -555,7 +555,19 @@ void verifySignature(final Path zip, final String urlString) throws IOException,
}

// compute the signature of the downloaded plugin zip
final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(pin, new JcaKeyFingerprintCalculator());
final List<String> lines =
new BufferedReader(new InputStreamReader(pin, StandardCharsets.UTF_8)).lines().collect(Collectors.toList());
// skip armor headers and possible blank line
int index = 1;
for (; index < lines.size(); index++) {
if (lines.get(index).matches(".*: .*") == false && lines.get(index).matches("\\s*") == false) {
break;
}
}
final byte[] armoredData =
lines.subList(index, lines.size() - 1).stream().collect(Collectors.joining("\n")).getBytes(StandardCharsets.UTF_8);
final InputStream ain = Base64.getMimeDecoder().wrap(new ByteArrayInputStream(armoredData));
final PGPPublicKeyRingCollection collection = new PGPPublicKeyRingCollection(ain, new JcaKeyFingerprintCalculator());
final PGPPublicKey key = collection.getPublicKey(signature.getKeyID());
signature.init(new JcaPGPContentVerifierBuilderProvider().setProvider(new BouncyCastleProvider()), key);
final byte[] buffer = new byte[1024];
Expand Down Expand Up @@ -597,7 +609,7 @@ String getPublicKeyId() {
* @return an input stream to the public key
*/
InputStream getPublicKey() {
return InstallPluginCommand.class.getResourceAsStream("/public_key");
return InstallPluginCommand.class.getResourceAsStream("/public_key.asc");
}

/**
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.6
Comment: Hostname: pgp.mit.edu

mQENBFI3HsoBCADXDtbNJnxbPqB1vDNtCsqhe49vFYsZN9IOZsZXgp7aHjh6CJBDA+bGFOwy
hbd7at35jQjWAw1O3cfYsKAmFy+Ar3LHCMkV3oZspJACTIgCrwnkic/9CUliQe324qvObU2Q
RtP4Fl0zWcfb/S8UYzWXWIFuJqMvE9MaRY1bwUBvzoqavLGZj3SF1SPO+TB5QrHkrQHBsmX+
Expand All @@ -22,3 +26,4 @@ EyUJ8SKsaHh4jV9wp9KmC8C+9CwMukL7vM5w8cgvJoAwsp3Fn59AxWthN3XJYcnMfStkIuWg
R7U2r+a210W6vnUxU4oN0PmMcursYPyeV0NX/KQeUeNMwGTFB6QHS/anRaGQewijkrYYoTNt
fllxIu9XYmiBERQ/qPDlGRlOgVTd9xUfHFkzB52c70E=
=92oX
-----END PGP PUBLIC KEY BLOCK-----
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import com.google.common.jimfs.Configuration;
import com.google.common.jimfs.Jimfs;
import org.apache.lucene.util.LuceneTestCase;
import org.apache.lucene.util.LuceneTestCase.AwaitsFix;
import org.bouncycastle.bcpg.ArmoredOutputStream;
import org.bouncycastle.bcpg.BCPGOutputStream;
import org.bouncycastle.bcpg.HashAlgorithmTags;
Expand Down Expand Up @@ -115,6 +116,7 @@
import static org.hamcrest.Matchers.not;

@LuceneTestCase.SuppressFileSystems("*")
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30900")
public class InstallPluginCommandTests extends ESTestCase {

private InstallPluginCommand skipJarHellCommand;
Expand Down Expand Up @@ -893,12 +895,7 @@ InputStream getPublicKey() {
final ArmoredOutputStream armored = new ArmoredOutputStream(output);
secretKey.getPublicKey().encode(armored);
armored.close();
final String publicKey = new String(output.toByteArray(), "UTF-8");
int start = publicKey.indexOf("\n", 1 + publicKey.indexOf("\n"));
int end = publicKey.lastIndexOf("\n", publicKey.lastIndexOf("\n") - 1);
// strip the header (first two lines) and footer (last line)
final String substring = publicKey.substring(1 + start, end);
return new ByteArrayInputStream(substring.getBytes("UTF-8"));
return new ByteArrayInputStream(output.toByteArray());
} catch (final IOException e) {
throw new AssertionError(e);
}
Expand Down
50 changes: 27 additions & 23 deletions docs/reference/indices/create-index.asciidoc
Original file line number Diff line number Diff line change
@@ -1,16 +1,39 @@
[[indices-create-index]]
== Create Index

The create index API allows to instantiate an index. Elasticsearch
provides support for multiple indices, including executing operations
across several indices.
The Create Index API is used to manually create an index in Elasticsearch. All documents in Elasticsearch
are stored inside of one index or another.

The most basic command is the following:

[source,js]
--------------------------------------------------
PUT twitter
--------------------------------------------------
// CONSOLE

This create an index named `twitter` with all default setting.

[NOTE]
.Index name limitations
======================================================
There are several limitations to what you can name your index. The complete list of limitations are:
- Lowercase only
- Cannot include `\`, `/`, `*`, `?`, `"`, `<`, `>`, `|`, ` ` (space character), `,`, `#`
- Indices prior to 7.0 could contain a colon (`:`), but that's been deprecated and won't be supported in 7.0+
- Cannot start with `-`, `_`, `+`
- Cannot be `.` or ``..`
- Cannot be longer than 255 bytes (note it is bytes, so multi-byte characters will count towards the 255 limit faster)
======================================================

[float]
[[create-index-settings]]
=== Index Settings

Each index created can have specific settings
associated with it.
associated with it, defined in the body:

[source,js]
--------------------------------------------------
Expand All @@ -28,25 +51,6 @@ PUT twitter
<1> Default for `number_of_shards` is 1
<2> Default for `number_of_replicas` is 1 (ie one replica for each primary shard)

The above second curl example shows how an index called `twitter` can be
created with specific settings for it using http://www.yaml.org[YAML].
In this case, creating an index with 3 shards, each with 2 replicas. The
index settings can also be defined with http://www.json.org[JSON]:

[source,js]
--------------------------------------------------
PUT twitter
{
"settings" : {
"index" : {
"number_of_shards" : 3,
"number_of_replicas" : 2
}
}
}
--------------------------------------------------
// CONSOLE

or more simplified

[source,js]
Expand Down
3 changes: 2 additions & 1 deletion docs/reference/migration/migrate_7_0.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ Elasticsearch 6.x in order to be readable by Elasticsearch 7.x.
* <<breaking_70_java_changes>>
* <<breaking_70_settings_changes>>
* <<breaking_70_scripting_changes>>

* <<breaking_70_snapshotstats_changes>>

include::migrate_7_0/aggregations.asciidoc[]
include::migrate_7_0/analysis.asciidoc[]
Expand All @@ -49,3 +49,4 @@ include::migrate_7_0/api.asciidoc[]
include::migrate_7_0/java.asciidoc[]
include::migrate_7_0/settings.asciidoc[]
include::migrate_7_0/scripting.asciidoc[]
include::migrate_7_0/snapshotstats.asciidoc[]
13 changes: 13 additions & 0 deletions docs/reference/migration/migrate_7_0/snapshotstats.asciidoc
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
[[breaking_70_snapshotstats_changes]]
=== Snapshot stats changes

Snapshot stats details are provided in a new structured way:

* `total` section for all the files that are referenced by the snapshot.
* `incremental` section for those files that actually needed to be copied over as part of the incremental snapshotting.
* In case of a snapshot that's still in progress, there's also a `processed` section for files that are in the process of being copied.

==== Deprecated `number_of_files`, `processed_files`, `total_size_in_bytes` and `processed_size_in_bytes` snapshot stats properties have been removed

* Properties `number_of_files` and `total_size_in_bytes` are removed and should be replaced by values of nested object `total`.
* Properties `processed_files` and `processed_size_in_bytes` are removed and should be replaced by values of nested object `processed`.
48 changes: 48 additions & 0 deletions docs/reference/modules/snapshots.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -563,6 +563,54 @@ GET /_snapshot/my_backup/snapshot_1/_status
// CONSOLE
// TEST[continued]

The output looks similar to the following:

[source,js]
--------------------------------------------------
{
"snapshots": [
{
"snapshot": "snapshot_1",
"repository": "my_backup",
"uuid": "XuBo4l4ISYiVg0nYUen9zg",
"state": "SUCCESS",
"include_global_state": true,
"shards_stats": {
"initializing": 0,
"started": 0,
"finalizing": 0,
"done": 5,
"failed": 0,
"total": 5
},
"stats": {
"incremental": {
"file_count": 8,
"size_in_bytes": 4704
},
"processed": {
"file_count": 7,
"size_in_bytes": 4254
},
"total": {
"file_count": 8,
"size_in_bytes": 4704
},
"start_time_in_millis": 1526280280355,
"time_in_millis": 358
}
}
]
}
--------------------------------------------------
// TESTRESPONSE

The output is composed of different sections. The `stats` sub-object provides details on the number and size of files that were
snapshotted. As snapshots are incremental, copying only the Lucene segments that are not already in the repository,
the `stats` object contains a `total` section for all the files that are referenced by the snapshot, as well as an `incremental` section
for those files that actually needed to be copied over as part of the incremental snapshotting. In case of a snapshot that's still
in progress, there's also a `processed` section that contains information about the files that are in the process of being copied.

Multiple ids are also supported:

[source,sh]
Expand Down
6 changes: 0 additions & 6 deletions qa/smoke-test-rank-eval-with-mustache/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,3 @@ dependencies {
testCompile project(path: ':modules:rank-eval', configuration: 'runtime')
testCompile project(path: ':modules:lang-mustache', configuration: 'runtime')
}

/*
* One of the integration tests doesn't work with the zip distribution
* and will be fixed later.
* Tracked by https://github.com/elastic/elasticsearch/issues/30628
*/
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
---
setup:
- do:
indices.create:
body:
settings:
index:
number_of_shards: 1
number_of_replicas: 1
index: test-index
---
Test retrieval of default settings:
- skip:
version: " - 6.3.99"
reason: include_defaults will not work in mixed-mode clusters containing nodes pre-6.4
- do:
indices.get_settings:
flat_settings: true
index: test-index
- is_false:
test-index.settings.index\.refresh_interval
- do:
indices.get_settings:
include_defaults: true
flat_settings: true
index: test-index
- match:
test-index.defaults.index\.refresh_interval: "1s"
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,3 @@ Test reset index settings:
indices.get_settings:
flat_settings: false
- is_false: test-index.settings.index\.refresh_interval

# Disabled until https://github.com/elastic/elasticsearch/pull/29229 is back-ported
# That PR changed the execution path of index settings default to be on the master
# until the PR is back-ported the old master will not return default settings.
#
# - do:
# indices.get_settings:
# include_defaults: true
# flat_settings: true
# index: test-index
# - match:
# test-index.defaults.index\.refresh_interval: "1s"
Loading

0 comments on commit 03e3bd2

Please sign in to comment.