Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix a DST error in date_histogram #52016

Merged
merged 6 commits into from
Feb 11, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -349,3 +349,33 @@ setup:
- match: { aggregations.histo.buckets.1.doc_count: 2 }
- match: { aggregations.histo.buckets.2.key_as_string: "2016-02-02T00:00:00.000Z" }
- match: { aggregations.histo.buckets.2.doc_count: 1 }

---
"date_histogram with pre-epoch daylight savings time transition":
- skip:
version: " - 7.99.99"
reason: bug fixed in 8.0.0. will be backported to 7.6.1

- do:
bulk:
index: test_1
refresh: true
body:
- '{"index": {}}'
- '{"date": "2016-01-01"}'

- do:
search:
body:
size: 0
aggs:
histo:
date_histogram:
field: date
fixed_interval: 1ms
time_zone: America/Phoenix

- match: { hits.total.value: 1 }
- length: { aggregations.histo.buckets: 1 }
- match: { aggregations.histo.buckets.0.key_as_string: "2015-12-31T17:00:00.000-07:00" }
- match: { aggregations.histo.buckets.0.doc_count: 1 }
Original file line number Diff line number Diff line change
Expand Up @@ -440,6 +440,12 @@ public Relation isFieldWithinQuery(IndexReader reader,
}
}

return isFieldWithinQuery(reader, fromInclusive, toInclusive, context);
}

@Override
public Relation isFieldWithinQuery(IndexReader reader, long fromInclusive, long toInclusive, QueryRewriteContext context)
throws IOException {
// This check needs to be done after fromInclusive and toInclusive
// are resolved so we can throw an exception if they are invalid
// even if there are no points in the shard
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -419,10 +419,27 @@ public enum Relation {
* {@link Relation#INTERSECTS}, which is always fine to return when there is
* no way to check whether values are actually within bounds. */
public Relation isFieldWithinQuery(
IndexReader reader,
Object from, Object to,
boolean includeLower, boolean includeUpper,
ZoneId timeZone, DateMathParser dateMathParser, QueryRewriteContext context) throws IOException {
IndexReader reader,
Object from, Object to,
boolean includeLower, boolean includeUpper,
ZoneId timeZone, DateMathParser dateMathParser, QueryRewriteContext context) throws IOException {
return Relation.INTERSECTS;
}

/**
* Return whether all values of the given {@link IndexReader} are within the range,
* outside the range or cross the range. The default implementation returns
* {@link Relation#INTERSECTS}, which is always fine to return when there is
* no way to check whether values are actually within bounds.
*
* Use this instead of
* {@link #isFieldWithinQuery(IndexReader, Object, Object, boolean, boolean, ZoneId, DateMathParser, QueryRewriteContext)}
* when you *know* the {@code fromInclusive} and {@code toInclusive} are always
* floats.
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think you mean they're always longs?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1

*/
public Relation isFieldWithinQuery(
IndexReader reader,
long fromInclusive, long toInclusive, QueryRewriteContext context) throws IOException {
return Relation.INTERSECTS;
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -474,8 +474,7 @@ ZoneId rewriteTimeZone(QueryShardContext context) throws IOException {
// rounding rounds down, so 'nextTransition' is a good upper bound
final long high = nextTransition;

if (ft.isFieldWithinQuery(reader, low, high, true, false, ZoneOffset.UTC, EPOCH_MILLIS_PARSER,
context) == Relation.WITHIN) {
if (ft.isFieldWithinQuery(reader, low, high - 1, context) == Relation.WITHIN) {
// All values in this reader have the same offset despite daylight saving times.
// This is very common for location-based timezones such as Europe/Paris in
// combination with time-based indices.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ public void testIsFieldWithinQueryEmptyReader() throws IOException {
ft.setName("my_date");
assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, "2015-10-12", "2016-04-03",
randomBoolean(), randomBoolean(), null, null, context));
assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, parse("2015-10-12"), parse("2016-04-03"), context));
}

private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader,
Expand Down Expand Up @@ -119,31 +120,43 @@ private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader,
public void testIsFieldWithinQuery() throws IOException {
Directory dir = newDirectory();
IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
long instant1 =
DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-12")).toInstant().toEpochMilli();
long instant2 =
DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2016-04-03")).toInstant().toEpochMilli();
Document doc = new Document();
LongPoint field = new LongPoint("my_date", instant1);
LongPoint field = new LongPoint("my_date", parse("2015-10-12"));
doc.add(field);
w.addDocument(doc);
field.setLongValue(instant2);
field.setLongValue(parse("2016-04-03"));
w.addDocument(doc);
DirectoryReader reader = DirectoryReader.open(w);
DateFieldType ft = new DateFieldType();
ft.setName("my_date");

// Test the parsing version of isFieldWithinQuery which is mostly used by range queries
DateMathParser alternateFormat = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser();
doTestIsFieldWithinQuery(ft, reader, null, null);
doTestIsFieldWithinQuery(ft, reader, null, alternateFormat);
doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, null);
doTestIsFieldWithinQuery(ft, reader, DateTimeZone.UTC, alternateFormat);

// Test the "direct from long" version of isfieldWithinQuery which is used to optimize time zones
QueryRewriteContext context = new QueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> nowInMillis);
assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, parse("2015-10-09"), parse("2016-01-02"), context));
assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, parse("2016-01-02"), parse("2016-06-20"), context));
assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, parse("2016-01-02"), parse("2016-02-12"), context));
assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, parse("2014-01-02"), parse("2015-02-12"), context));
assertEquals(Relation.DISJOINT, ft.isFieldWithinQuery(reader, parse("2016-05-11"), parse("2016-08-30"), context));
assertEquals(Relation.WITHIN, ft.isFieldWithinQuery(reader, parse("2015-09-25"), parse("2016-05-29"), context));
assertEquals(Relation.WITHIN, ft.isFieldWithinQuery(reader, parse("2015-10-12"), parse("2016-04-03"), context));
assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, parse("2015-10-12") + 1, parse("2016-04-03") - 1, context));
assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, parse("2015-10-12") + 1, parse("2016-04-03"), context));
assertEquals(Relation.INTERSECTS, ft.isFieldWithinQuery(reader, parse("2015-10-12"), parse("2016-04-03") - 1, context));


// Fields with no value indexed.
DateFieldType ft2 = new DateFieldType();
ft2.setName("my_date2");

QueryRewriteContext context = new QueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> nowInMillis);
assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, "2015-10-09", "2016-01-02", false, false, null, null, context));
assertEquals(Relation.DISJOINT, ft2.isFieldWithinQuery(reader, parse("2015-10-09"), parse("2016-01-02"), context));
IOUtils.close(reader, w, dir);
}

Expand Down Expand Up @@ -251,4 +264,8 @@ public void testDateNanoDocValues() throws IOException {
w.close();
dir.close();
}

private long parse(String str) {
return DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(str)).toInstant().toEpochMilli();
}
}