Skip to content

Commit

Permalink
Merge branch 'master' into feature/aggregate-metrics
Browse files Browse the repository at this point in the history
  • Loading branch information
csoulios committed May 14, 2020
2 parents 9b77f77 + c24885d commit 046118c
Show file tree
Hide file tree
Showing 122 changed files with 2,611 additions and 820 deletions.
2 changes: 2 additions & 0 deletions .ci/bwcVersions
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ BWC_VERSION:
- "7.6.0"
- "7.6.1"
- "7.6.2"
- "7.7.0"
- "7.7.1"
- "7.8.0"
- "7.9.0"
- "8.0.0"
2 changes: 1 addition & 1 deletion build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ task updateCIBwcVersions() {
File yml = file(".ci/bwcVersions")
yml.text = ""
yml << "BWC_VERSION:\n"
versions.indexCompatible.each {
BuildParams.bwcVersions.indexCompatible.each {
yml << " - \"$it\"\n"
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,8 @@
import org.gradle.api.DefaultTask;
import org.gradle.api.UncheckedIOException;
import org.gradle.api.file.FileCollection;
import org.gradle.api.tasks.Input;
import org.gradle.api.tasks.InputFile;
import org.gradle.api.tasks.InputFiles;
import org.gradle.api.tasks.Optional;
import org.gradle.api.tasks.OutputFile;
import org.gradle.api.tasks.TaskAction;
import org.gradle.work.ChangeType;
Expand All @@ -44,9 +42,7 @@
import java.io.PrintWriter;
import java.nio.file.Files;
import java.nio.file.StandardOpenOption;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.Map;
Expand All @@ -59,7 +55,6 @@
public class ValidateJsonAgainstSchemaTask extends DefaultTask {

private final ObjectMapper mapper = new ObjectMapper();
private Set<String> ignore = new HashSet<>();
private File jsonSchema;
private FileCollection inputFiles;

Expand All @@ -82,16 +77,6 @@ public void setJsonSchema(File jsonSchema) {
this.jsonSchema = jsonSchema;
}

@Input
@Optional
public Set<String> getIgnore() {
return ignore;
}

public void ignore(String... ignore) {
this.ignore.addAll(Arrays.asList(ignore));
}

@OutputFile
public File getReport() {
return new File(getProject().getBuildDir(), "reports/validateJson.txt");
Expand All @@ -110,9 +95,7 @@ public void validate(InputChanges inputChanges) throws IOException {
.filter(f -> f.getChangeType() != ChangeType.REMOVED)
.forEach(fileChange -> {
File file = fileChange.getFile();
if (ignore.contains(file.getName())) {
getLogger().debug("Ignoring file [{}] due to configuration", file.getName());
} else if (file.isDirectory() == false) {
if (file.isDirectory() == false) {
// validate all files and hold on to errors for a complete report if there are failures
getLogger().debug("Validating JSON [{}]", file.getName());
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,12 @@
package org.elasticsearch.gradle;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
Expand Down Expand Up @@ -89,7 +87,6 @@ public class BwcVersions {
private static final Pattern LINE_PATTERN = Pattern.compile(
"\\W+public static final Version V_(\\d+)_(\\d+)_(\\d+)(_alpha\\d+|_beta\\d+|_rc\\d+)? .*"
);
private static final List<Version> IGNORED_VERSIONS = Arrays.asList(Version.fromString("7.7.0"));

private final Version currentVersion;
private final Map<Integer, List<Version>> groupByMajor;
Expand Down Expand Up @@ -123,7 +120,6 @@ protected BwcVersions(List<String> versionLines, Version currentVersionProperty)
Integer.parseInt(match.group(3))
)
)
.filter(v -> !IGNORED_VERSIONS.contains(v)) // remove any specifically ignored versions
.collect(Collectors.toCollection(TreeSet::new)),
currentVersionProperty
);
Expand Down Expand Up @@ -276,16 +272,7 @@ public List<Version> getUnreleased() {
// we found that the previous minor is staged but not yet released
// in this case, the minor before that has a bugfix, should there be such a minor
if (greatestMinor >= 2) {
int major = groupByMinor.values()
.stream()
.flatMap(Collection::stream)
.findFirst()
.map(Version::getMajor)
.orElseThrow(NoSuchElementException::new);
// Don't bother searching for a version we've ignored
if (IGNORED_VERSIONS.contains(new Version(major, greatestMinor - 2, 0)) == false) {
unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2));
}
unreleased.add(getLatestVersionByKey(groupByMinor, greatestMinor - 2));
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,10 @@
import org.elasticsearch.search.aggregations.bucket.range.RangeAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler;
import org.elasticsearch.search.aggregations.bucket.sampler.ParsedSampler;
import org.elasticsearch.search.aggregations.bucket.significant.ParsedSignificantLongTerms;
import org.elasticsearch.search.aggregations.bucket.significant.ParsedSignificantStringTerms;
import org.elasticsearch.search.aggregations.bucket.significant.SignificantLongTerms;
import org.elasticsearch.search.aggregations.bucket.significant.SignificantStringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantLongTerms;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedSignificantStringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.SignificantLongTerms;
import org.elasticsearch.search.aggregations.bucket.terms.SignificantStringTerms;
import org.elasticsearch.search.aggregations.bucket.terms.DoubleTerms;
import org.elasticsearch.search.aggregations.bucket.terms.LongTerms;
import org.elasticsearch.search.aggregations.bucket.terms.ParsedDoubleTerms;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.significant.SignificantTerms;
import org.elasticsearch.search.aggregations.bucket.terms.SignificantTerms;
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator;

import java.io.IOException;
Expand Down
8 changes: 4 additions & 4 deletions docs/reference/ilm/actions/ilm-allocate.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,16 @@ Phases allowed: warm, cold.
Updates the index settings to change which nodes are allowed to host the index shards
and change the number of replicas.

The allocate action is not allowed in the hot phase.
The initial allocation for the index must be done manually or via
<<indices-templates, index templates>>.

You can configure this action to modify both the allocation rules and number of replicas,
only the allocation rules, or only the number of replicas.

For more information about how {es} uses replicas for scaling, see
<<scalability>>. See <<shard-allocation-filtering>> for more information about
controlling where {es} allocates shards of a particular index.

NOTE: The allocate action is not allowed in the hot phase.
The initial allocation for the index must be done manually or via index templates.
{ilm-init} doesn't handle index allocation during the hot phase.

[[ilm-allocate-options]]
==== Options
Expand Down
9 changes: 9 additions & 0 deletions docs/reference/ilm/ilm-index-lifecycle.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,15 @@ the index must be older than the minimum age of the next phase.
The minimum age defaults to zero, which causes {ilm-init} to move indices to the next phase
as soon as all actions in the current phase complete.

If an index has unallocated shards and the <<cluster-health,cluster health status>> is yellow,
the index can still transition to the next phase according to its {ilm} policy.
However, because {es} can only perform certain clean up tasks on a green
cluster, there might be unexpected side effects.

To avoid increased disk usage and reliability issues,
address any cluster health problems in a timely fashion.


[discrete]
[[ilm-phase-execution]]
=== Phase execution
Expand Down
13 changes: 7 additions & 6 deletions docs/reference/ilm/ilm-tutorial.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@
This tutorial demonstrates how to use {ilm}
({ilm-init}) to manage indices that contain time-series data.

When you continuously index timestamped documents into {es} using
Filebeat, Logstash, or some other mechanism,
When you continuously index timestamped documents into {es},
you typically use an index alias so you can periodically roll over to a new index.
This enables you to implement a hot-warm-cold architecture to meet your performance
requirements for your newest data, control costs over time, enforce retention policies,
Expand All @@ -28,10 +27,12 @@ as expected.

For an introduction to rolling indices, see <<index-rollover>>.

NOTE: {filebeat} includes a default {ilm-init} policy that initiates the rollover action when
the index size reaches 50GB or becomes 30 days old.
You can use this policy as a starting point, or replace it with a custom policy.
See {kibana-ref}/example-using-index-lifecycle-policy.html[Use {ilm-init} to manage Filebeat time-based indices].
IMPORTANT: When you enable {ilm} for {beats} or the {ls} {es} output plugin,
lifecycle policies are set up automatically.
You do not need bootstrap the initial index or take any other actions.
You can modify the default policies through
{kibana-ref}/example-using-index-lifecycle-policy.html[{kib} Management]
or the {ilm-init} APIs.


[discrete]
Expand Down
5 changes: 4 additions & 1 deletion docs/reference/ilm/index.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ For example, you could use {ilm-init} to:
* Create a new index each day, week, or month and archive previous ones
* Delete stale indices to enforce data retention standards

When you enable {ilm} for {beats} or the {ls} {es} output plugin,
{ilm-init} is configured automatically.
You can modify the default policies through {kib} Management or the {ilm-init} APIs.

[TIP]
To automatically back up your indices and manage snapshots,
use <<getting-started-snapshot-lifecycle-management,snapshot lifecycle policies>>.
Expand Down Expand Up @@ -48,4 +52,3 @@ include::using-policies-rollover.asciidoc[]
include::ilm-with-existing-indices.asciidoc[]

include::ilm-and-snapshots.asciidoc[]

6 changes: 6 additions & 0 deletions docs/reference/ilm/set-up-lifecycle-policy.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,12 @@ you create the policy and add it to the index template.
To use a policy to manage an index that doesn't roll over,
you can specify the policy directly when you create it.

IMPORTANT: When you enable {ilm} for {beats} or the {ls} {es} output plugin,
the necessary policies and configuration changes are applied automatically.
You can modify the default policies, but you do not need to explicitly configure a policy or
bootstrap an initial index.


[discrete]
[[ilm-create-policy]]
=== Create lifecycle policy
Expand Down
12 changes: 4 additions & 8 deletions docs/reference/indices/analyze.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ the `analyzer` or `<field>` parameter overrides this value.
If no analyzer or field are specified,
the analyze API uses the default analyzer for the index.

If no index is specified
If no index is specified
or the index does not have a default analyzer,
the analyze API uses the <<analysis-standard-analyzer,standard analyzer>>.
--
Expand All @@ -56,11 +56,9 @@ the analyze API uses the <<analysis-standard-analyzer,standard analyzer>>.
`analyzer`::
+
--
(Optional, string or <<analysis-custom-analyzer,custom analyzer object>>)
Analyzer used to analyze for the provided `text`.

See <<analysis-analyzers>> for a list of built-in analyzers.
You can also provide a <<analysis-custom-analyzer,custom analyzer>>.
(Optional, string)
The name of the analyzer that should be applied to the provided `text`. This could be a
<<analysis-analyzers, built-in analyzer>>, or an analyzer that's been configured in the index.

If this parameter is not specified,
the analyze API uses the analyzer defined in the field's mapping.
Expand Down Expand Up @@ -187,8 +185,6 @@ GET /_analyze
}
--------------------------------------------------

deprecated[5.0.0, Use `filter`/`char_filter` instead of `filters`/`char_filters` and `token_filters` has been removed]

Custom tokenizers, token filters, and character filters can be specified in the request body as follows:

[source,console]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower
}
hi = Math.round(Math.floor(dValue));
}
Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues());
Query query = NumberFieldMapper.NumberType.LONG.rangeQuery(name(), lo, hi, true, true, hasDocValues(), context);
if (boost() != 1f) {
query = new BoostQuery(query, boost());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,8 @@ public void testRangeQuery() throws IOException {
Double u = randomBoolean() ? null : (randomDouble() * 2 - 1) * 10000;
boolean includeLower = randomBoolean();
boolean includeUpper = randomBoolean();
Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false);
Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper, null);
Query doubleQ = NumberFieldMapper.NumberType.DOUBLE.rangeQuery("double", l, u, includeLower, includeUpper, false, MOCK_QSC);
Query scaledFloatQ = ft.rangeQuery(l, u, includeLower, includeUpper, MOCK_QSC);
assertEquals(searcher.count(doubleQ), searcher.count(scaledFloatQ));
}
IOUtils.close(reader, dir);
Expand All @@ -128,37 +128,37 @@ public void testRoundsUpperBoundCorrectly() {
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
ft.setName("scaled_float");
ft.setScalingFactor(100.0);
Query scaledFloatQ = ft.rangeQuery(null, 0.1, true, false, null);
Query scaledFloatQ = ft.rangeQuery(null, 0.1, true, false, MOCK_QSC);
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(null, 0.1, true, true, null);
scaledFloatQ = ft.rangeQuery(null, 0.1, true, true, MOCK_QSC);
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(null, 0.095, true, false, null);
scaledFloatQ = ft.rangeQuery(null, 0.095, true, false, MOCK_QSC);
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(null, 0.095, true, true, null);
scaledFloatQ = ft.rangeQuery(null, 0.095, true, true, MOCK_QSC);
assertEquals("scaled_float:[-9223372036854775808 TO 9]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(null, 0.105, true, false, null);
scaledFloatQ = ft.rangeQuery(null, 0.105, true, false, MOCK_QSC);
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(null, 0.105, true, true, null);
scaledFloatQ = ft.rangeQuery(null, 0.105, true, true, MOCK_QSC);
assertEquals("scaled_float:[-9223372036854775808 TO 10]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(null, 79.99, true, true, null);
scaledFloatQ = ft.rangeQuery(null, 79.99, true, true, MOCK_QSC);
assertEquals("scaled_float:[-9223372036854775808 TO 7999]", scaledFloatQ.toString());
}

public void testRoundsLowerBoundCorrectly() {
ScaledFloatFieldMapper.ScaledFloatFieldType ft = new ScaledFloatFieldMapper.ScaledFloatFieldType();
ft.setName("scaled_float");
ft.setScalingFactor(100.0);
Query scaledFloatQ = ft.rangeQuery(-0.1, null, false, true, null);
Query scaledFloatQ = ft.rangeQuery(-0.1, null, false, true, MOCK_QSC);
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(-0.1, null, true, true, null);
scaledFloatQ = ft.rangeQuery(-0.1, null, true, true, MOCK_QSC);
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(-0.095, null, false, true, null);
scaledFloatQ = ft.rangeQuery(-0.095, null, false, true, MOCK_QSC);
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(-0.095, null, true, true, null);
scaledFloatQ = ft.rangeQuery(-0.095, null, true, true, MOCK_QSC);
assertEquals("scaled_float:[-9 TO 9223372036854775807]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(-0.105, null, false, true, null);
scaledFloatQ = ft.rangeQuery(-0.105, null, false, true, MOCK_QSC);
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
scaledFloatQ = ft.rangeQuery(-0.105, null, true, true, null);
scaledFloatQ = ft.rangeQuery(-0.105, null, true, true, MOCK_QSC);
assertEquals("scaled_float:[-10 TO 9223372036854775807]", scaledFloatQ.toString());
}

Expand Down
Loading

0 comments on commit 046118c

Please sign in to comment.