From e9fcb25a2876594cbadbc6f229e524e3dde8875d Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 22 Jan 2019 11:44:29 +0100 Subject: [PATCH 01/39] Upgrade to lucene-8.0.0-snapshot-83f9835. (#37668) This snapshot uses a new file format for doc-values which is expected to make advance/advanceExact perform faster on sparse fields: https://issues.apache.org/jira/browse/LUCENE-8585 --- buildSrc/version.properties | 2 +- .../lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - ...lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 | 1 + ...ene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - ...lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 | 1 + ...ene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 | 1 + ...cene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 | 1 + ...cene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - ...cene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 | 1 + ...e-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 | 1 + ...ucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 | 1 + ...lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - .../java/org/elasticsearch/index/query/IntervalBuilder.java | 5 +++++ .../licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 | 1 + .../licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 | 1 - 50 files changed, 30 insertions(+), 25 deletions(-) create mode 100644 modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 create mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 delete mode 100644 x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 40380af0823b6..778f29686ad67 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ elasticsearch = 7.0.0 -lucene = 8.0.0-snapshot-a1c6e642aa +lucene = 8.0.0-snapshot-83f9835 # optional dependencies spatial4j = 0.7 diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..fa128372b2467 --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c84431b751d851f484f2799f6dcb9110113b1958 \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 6f055c7ac5eda..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -825a0eb2b9ff58df36753971217cba80466d7132 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..927a881df73f1 --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +ea440f53a9e858c2ed87927c63d57eb70e5af9ee \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 4d7a031054064..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analyzers-icu-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5b0beeaa95c28e5e0679d684d4e2e30e90cf53e7 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..075bd24f3e609 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +b016fdfb8f1ac413d902cd4d244b295c9f01e610 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 0c588e1607a0d..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analyzers-kuromoji-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fff58bb761b71ded4bf1bfd41bad522df5c67f5c \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..d5046efc95935 --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +395c8291751ffa7fbbb96faf578408a33a34ad1d \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 78a9e33ed22e1..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analyzers-nori-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -167a2b06379c4a1233a866ea88df17bb481e7200 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..19a0a9a198ce0 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +6957c71604356a1fbf0e13794595e4ea42126dd7 \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 1ea10265dcfef..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analyzers-phonetic-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -77a1ed8e4544c31b7c949c9f0ddb7bc2a53adfb9 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..91597e3941158 --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +09f42235a8daca4ca8ea604314c2eb9de51b9e98 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 0e69798085eb1..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analyzers-smartcn-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -49b825fc84de3f993eb161d1a38fdeefa9b5511a \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..a868f7d5a053f --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7cf7eeb685a2060e97f50551236cfcaf39990083 \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 85d83e7674670..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analyzers-stempel-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6c60a0b708cd1c61ec34191df1bcf99b9211c08f \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..4f5f80aa96b91 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +ff163fb06ec3e47d501b290d8f69cca88a0341cc \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index eab26671dcaa3..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analyzers-morfologik-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -553be14e6c3bb82b7e70f76ce2d294e4fa26fc20 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..4dc7f4b4ca6bf --- /dev/null +++ b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +f4d286ed7940fc206331424e8ce577584208eba3 \ No newline at end of file diff --git a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index be6bfec6e5563..0000000000000 --- a/server/licenses/lucene-analyzers-common-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -66e4d1a3f91be88d903b1c75e71c8b15a7dc4f90 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..df0783e11fee6 --- /dev/null +++ b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e5416da7370f5def9a79fb1cccb091b091b808a5 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 357ce92760e39..0000000000000 --- a/server/licenses/lucene-backward-codecs-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -069cfb0c693d365cbf332973ab796ba33646f867 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7a4b68f8d857d --- /dev/null +++ b/server/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e3a95ff3cbd96e2c05b90932c20ca6374cdcdbe9 \ No newline at end of file diff --git a/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index efbb9ada534a5..0000000000000 --- a/server/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad32720fe677becb93a26692338b63754613aa50 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..e1933653b9f4d --- /dev/null +++ b/server/licenses/lucene-grouping-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +045dcae6368a436cec6e56f654a9c6b3a4656d17 \ No newline at end of file diff --git a/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 7f35ad20b8230..0000000000000 --- a/server/licenses/lucene-grouping-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3f9499ffc5e956f7a113308198e74e80b7df290b \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..85aff2f06dd49 --- /dev/null +++ b/server/licenses/lucene-highlighter-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +63b7164d24547cea36df3d81eedb9e8b424cf3c2 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 566114f72948f..0000000000000 --- a/server/licenses/lucene-highlighter-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -162cac09191f267039cdb73ccc7face5ef54ba8b \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..41b3910da1319 --- /dev/null +++ b/server/licenses/lucene-join-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7b3358889c491237709e8aea4a39e816227f3d26 \ No newline at end of file diff --git a/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index baa09c01bed13..0000000000000 --- a/server/licenses/lucene-join-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -106cbbc96feb7413b17539f8a0ae2e7692550f44 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..22318a34bf09e --- /dev/null +++ b/server/licenses/lucene-memory-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +5d9566e2b3be41c81fe42df1c57fff31fcdbc565 \ No newline at end of file diff --git a/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index ba3d8ec94d0ac..0000000000000 --- a/server/licenses/lucene-memory-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -37336dec582ce1569e944d1d8a5181c2eb2aec25 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..2c4429c8c7619 --- /dev/null +++ b/server/licenses/lucene-misc-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +45574e22c4b1569a2aecd5b2837388c7287892b5 \ No newline at end of file diff --git a/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 764fffadffe99..0000000000000 --- a/server/licenses/lucene-misc-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c18617a95c109160d0dacb58a1e268014f7f5862 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..ae11e5858a28c --- /dev/null +++ b/server/licenses/lucene-queries-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +67405660330db8c09e0994a615cf3ab95d7bc151 \ No newline at end of file diff --git a/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index e50a454a0ebd9..0000000000000 --- a/server/licenses/lucene-queries-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ab86036efd74fc41730e20dd2d3de977297878e0 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..fcb52584b667c --- /dev/null +++ b/server/licenses/lucene-queryparser-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c8de64bf2c4f09766d4cd62e8dd403016665f37c \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 8b2d290d647ed..0000000000000 --- a/server/licenses/lucene-queryparser-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -02f3f472494f250da6fe7199de6c2f2ef5972774 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..80750b2d41e33 --- /dev/null +++ b/server/licenses/lucene-sandbox-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +fc080b45b881a78a23743df9a7388fd3dbbb9d66 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 2466df430847f..0000000000000 --- a/server/licenses/lucene-sandbox-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -da0a248a2bb69499715411b682d45adaea5ab499 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..32f5ca7196e17 --- /dev/null +++ b/server/licenses/lucene-spatial-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +6e0e12b9882005b87ef39325e1fc8539c8caff31 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index c8b8762f25a92..0000000000000 --- a/server/licenses/lucene-spatial-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8ed0aad4c4214d0fe3571dfa2d09c936a91cf3c7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..2cf474908d1ba --- /dev/null +++ b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +7ed9c9a03e1a15840237d09ca4da7aadfce1c780 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 17b2f3ef4a33c..0000000000000 --- a/server/licenses/lucene-spatial-extras-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -9105a1c73feeb836ca4244367f02d6a8d7e3cc27 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..c4016fb692e2f --- /dev/null +++ b/server/licenses/lucene-spatial3d-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +fdf0efdeb9a73a6b0f8349df21049ecbc73955d7 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 62d3f24344e33..0000000000000 --- a/server/licenses/lucene-spatial3d-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6ebc95227d4415cc6d345c1dd3759e1f348e0ca4 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7ea28b0ed87da --- /dev/null +++ b/server/licenses/lucene-suggest-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +c69399183d5f9f85f3f8130452d0bed62fd92440 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index 8471aff350b83..0000000000000 --- a/server/licenses/lucene-suggest-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c4cf1b911521f962c9bd2d28efb519bf9e1b88f4 \ No newline at end of file diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java index 7f42eb137190d..b39f2ab5a91e6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalBuilder.java @@ -275,6 +275,11 @@ public MatchesIterator matches(String field, LeafReaderContext ctx, int doc) { return null; } + @Override + public int minExtent() { + return 0; + } + @Override public void extractTerms(String field, Set terms) { diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 new file mode 100644 index 0000000000000..7a4b68f8d857d --- /dev/null +++ b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-83f9835.jar.sha1 @@ -0,0 +1 @@ +e3a95ff3cbd96e2c05b90932c20ca6374cdcdbe9 \ No newline at end of file diff --git a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 b/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 deleted file mode 100644 index efbb9ada534a5..0000000000000 --- a/x-pack/plugin/sql/sql-action/licenses/lucene-core-8.0.0-snapshot-a1c6e642aa.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ad32720fe677becb93a26692338b63754613aa50 \ No newline at end of file From 5db7ed22a0aab6e630a89952ec94327d205fc3a3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 22 Jan 2019 11:03:51 +0000 Subject: [PATCH 02/39] Bootstrap a Zen2 cluster once quorum is discovered (#37463) Today when bootstrapping a Zen2 cluster we wait for every node in the `initial_master_nodes` setting to be discovered, so that we can map the node names or addresses in the `initial_master_nodes` list to their IDs for inclusion in the initial voting configuration. This means that if any of the expected master-eligible nodes fails to start then bootstrapping will not occur and the cluster will not form. This is not ideal, and we would prefer the cluster to bootstrap even if some of the master-eligible nodes do not start. Safe bootstrapping requires that all pairs of quorums of all initial configurations overlap, and this is particularly troublesome to ensure given that nodes may be concurrently and independently attempting to bootstrap the cluster. The solution is to bootstrap using an initial configuration whose size matches the size of the expected set of master-eligible nodes, but with the unknown IDs replaced by "placeholder" IDs that can never belong to any node. Any quorum of received votes in any of these placeholder-laden initial configurations is also a quorum of the "true" initial set of master-eligible nodes, giving the guarantee that it intersects all other quorums as required. Note that this change means that the initial configuration is not necessarily robust to any node failures. Normally the cluster will form and then auto-reconfigure to a more robust configuration in which the placeholder IDs are replaced by the IDs of genuine nodes as they join the cluster; however if a node fails between bootstrapping and this auto-reconfiguration then the cluster may become unavailable. This we feel to be less likely than a node failing to start at all. This commit also enormously simplifies the cluster bootstrapping process. Today, the cluster bootstrapping process involves two (local) transport actions in order to support a flexible bootstrapping API and to make it easily accessible to plugins. However this flexibility is not required for the current design so it is adding a good deal of unnecessary complexity. Here we remove this complexity in favour of a much simpler ClusterBootstrapService implementation that does all the work itself. --- .../elasticsearch/ElasticsearchException.java | 4 +- .../elasticsearch/action/ActionModule.java | 6 - .../bootstrap/BootstrapClusterAction.java | 41 -- .../bootstrap/BootstrapClusterRequest.java | 65 --- .../bootstrap/BootstrapClusterResponse.java | 66 --- .../bootstrap/BootstrapConfiguration.java | 179 ------ .../bootstrap/GetDiscoveredNodesAction.java | 41 -- .../bootstrap/GetDiscoveredNodesRequest.java | 119 ---- .../bootstrap/GetDiscoveredNodesResponse.java | 73 --- .../TransportBootstrapClusterAction.java | 87 --- .../TransportGetDiscoveredNodesAction.java | 178 ------ .../ClusterAlreadyBootstrappedException.java | 38 -- .../coordination/ClusterBootstrapService.java | 295 +++++----- .../ClusterFormationFailureHelper.java | 16 +- .../cluster/coordination/Coordinator.java | 56 +- .../ExceptionSerializationTests.java | 4 +- .../BootstrapClusterRequestTests.java | 39 -- .../BootstrapClusterResponseTests.java | 33 -- .../BootstrapConfigurationTests.java | 182 ------ .../GetDiscoveredNodesRequestTests.java | 64 --- .../GetDiscoveredNodesResponseTests.java | 59 -- .../TransportBootstrapClusterActionTests.java | 233 -------- ...ransportGetDiscoveredNodesActionTests.java | 533 ------------------ .../ClusterBootstrapServiceTests.java | 473 ++++++++++++---- .../ClusterFormationFailureHelperTests.java | 22 + .../coordination/CoordinatorTests.java | 91 +-- .../snapshots/SnapshotsServiceTests.java | 14 +- 27 files changed, 563 insertions(+), 2448 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java delete mode 100644 server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java delete mode 100644 server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java delete mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 530c5ce4f6396..1fc5ef474ee04 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -1009,10 +1009,8 @@ private enum ElasticsearchExceptionHandle { MultiBucketConsumerService.TooManyBucketsException::new, 149, Version.V_7_0_0), COORDINATION_STATE_REJECTED_EXCEPTION(org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, Version.V_7_0_0), - CLUSTER_ALREADY_BOOTSTRAPPED_EXCEPTION(org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException.class, - org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException::new, 151, Version.V_7_0_0), SNAPSHOT_IN_PROGRESS_EXCEPTION(org.elasticsearch.snapshots.SnapshotInProgressException.class, - org.elasticsearch.snapshots.SnapshotInProgressException::new, 152, Version.V_7_0_0); + org.elasticsearch.snapshots.SnapshotInProgressException::new, 151, Version.V_7_0_0); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index a3d3c615162dc..142aa6bde74b6 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -23,10 +23,6 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainAction; import org.elasticsearch.action.admin.cluster.allocation.TransportClusterAllocationExplainAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.TransportBootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.TransportGetDiscoveredNodesAction; import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; import org.elasticsearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction; @@ -433,8 +429,6 @@ public void reg actions.register(GetTaskAction.INSTANCE, TransportGetTaskAction.class); actions.register(CancelTasksAction.INSTANCE, TransportCancelTasksAction.class); - actions.register(GetDiscoveredNodesAction.INSTANCE, TransportGetDiscoveredNodesAction.class); - actions.register(BootstrapClusterAction.INSTANCE, TransportBootstrapClusterAction.class); actions.register(AddVotingConfigExclusionsAction.INSTANCE, TransportAddVotingConfigExclusionsAction.class); actions.register(ClearVotingConfigExclusionsAction.INSTANCE, TransportClearVotingConfigExclusionsAction.class); actions.register(ClusterAllocationExplainAction.INSTANCE, TransportClusterAllocationExplainAction.class); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java deleted file mode 100644 index 28a8e580cedc4..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterAction.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.Action; -import org.elasticsearch.common.io.stream.Writeable.Reader; - -public class BootstrapClusterAction extends Action { - public static final BootstrapClusterAction INSTANCE = new BootstrapClusterAction(); - public static final String NAME = "cluster:admin/bootstrap/set_voting_config"; - - private BootstrapClusterAction() { - super(NAME); - } - - @Override - public BootstrapClusterResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public Reader getResponseReader() { - return BootstrapClusterResponse::new; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java deleted file mode 100644 index f8d0bcb13a58f..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequest.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Request to set the initial configuration of master-eligible nodes in a cluster so that the very first master election can take place. - */ -public class BootstrapClusterRequest extends ActionRequest { - private final BootstrapConfiguration bootstrapConfiguration; - - public BootstrapClusterRequest(BootstrapConfiguration bootstrapConfiguration) { - this.bootstrapConfiguration = bootstrapConfiguration; - } - - public BootstrapClusterRequest(StreamInput in) throws IOException { - super(in); - bootstrapConfiguration = new BootstrapConfiguration(in); - } - - /** - * @return the bootstrap configuration: the initial set of master-eligible nodes whose votes are counted in elections. - */ - public BootstrapConfiguration getBootstrapConfiguration() { - return bootstrapConfiguration; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - bootstrapConfiguration.writeTo(out); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java deleted file mode 100644 index 2576409a3cef1..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponse.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; - -/** - * Response to a {@link BootstrapClusterRequest} indicating that the cluster has been successfully bootstrapped. - */ -public class BootstrapClusterResponse extends ActionResponse { - private final boolean alreadyBootstrapped; - - public BootstrapClusterResponse(boolean alreadyBootstrapped) { - this.alreadyBootstrapped = alreadyBootstrapped; - } - - public BootstrapClusterResponse(StreamInput in) throws IOException { - super(in); - alreadyBootstrapped = in.readBoolean(); - } - - /** - * @return whether this node already knew that the cluster had been bootstrapped when handling this request. - */ - public boolean getAlreadyBootstrapped() { - return alreadyBootstrapped; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(alreadyBootstrapped); - } - - @Override - public String toString() { - return "BootstrapClusterResponse{" + - "alreadyBootstrapped=" + alreadyBootstrapped + - '}'; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java deleted file mode 100644 index 822287af77465..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfiguration.java +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Writeable; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; -import java.util.Objects; -import java.util.Set; -import java.util.stream.Collectors; - -public class BootstrapConfiguration implements Writeable { - - private final List nodeDescriptions; - - public BootstrapConfiguration(List nodeDescriptions) { - if (nodeDescriptions.isEmpty()) { - throw new IllegalArgumentException("cannot create empty bootstrap configuration"); - } - this.nodeDescriptions = Collections.unmodifiableList(new ArrayList<>(nodeDescriptions)); - } - - public BootstrapConfiguration(StreamInput in) throws IOException { - nodeDescriptions = Collections.unmodifiableList(in.readList(NodeDescription::new)); - assert nodeDescriptions.isEmpty() == false; - } - - public List getNodeDescriptions() { - return nodeDescriptions; - } - - public VotingConfiguration resolve(Iterable discoveredNodes) { - final Set selectedNodes = new HashSet<>(); - for (final NodeDescription nodeDescription : nodeDescriptions) { - final DiscoveryNode discoveredNode = nodeDescription.resolve(discoveredNodes); - if (selectedNodes.add(discoveredNode) == false) { - throw new ElasticsearchException("multiple nodes matching {} in {}", discoveredNode, this); - } - } - - final Set nodeIds = selectedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); - assert nodeIds.size() == selectedNodes.size() : selectedNodes + " does not contain distinct IDs"; - return new VotingConfiguration(nodeIds); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeList(nodeDescriptions); - } - - @Override - public String toString() { - return "BootstrapConfiguration{" + - "nodeDescriptions=" + nodeDescriptions + - '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - BootstrapConfiguration that = (BootstrapConfiguration) o; - return Objects.equals(nodeDescriptions, that.nodeDescriptions); - } - - @Override - public int hashCode() { - return Objects.hash(nodeDescriptions); - } - - public static class NodeDescription implements Writeable { - - @Nullable - private final String id; - - private final String name; - - @Nullable - public String getId() { - return id; - } - - public String getName() { - return name; - } - - public NodeDescription(@Nullable String id, String name) { - this.id = id; - this.name = Objects.requireNonNull(name); - } - - public NodeDescription(DiscoveryNode discoveryNode) { - this(discoveryNode.getId(), discoveryNode.getName()); - } - - public NodeDescription(StreamInput in) throws IOException { - this(in.readOptionalString(), in.readString()); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalString(id); - out.writeString(name); - } - - @Override - public String toString() { - return "NodeDescription{" + - "id='" + id + '\'' + - ", name='" + name + '\'' + - '}'; - } - - public DiscoveryNode resolve(Iterable discoveredNodes) { - DiscoveryNode selectedNode = null; - for (final DiscoveryNode discoveredNode : discoveredNodes) { - assert discoveredNode.isMasterNode() : discoveredNode; - if (discoveredNode.getName().equals(name)) { - if (id == null || id.equals(discoveredNode.getId())) { - if (selectedNode != null) { - throw new ElasticsearchException( - "discovered multiple nodes matching {} in {}", this, discoveredNodes); - } - selectedNode = discoveredNode; - } else { - throw new ElasticsearchException("node id mismatch comparing {} to {}", this, discoveredNode); - } - } else if (id != null && id.equals(discoveredNode.getId())) { - throw new ElasticsearchException("node name mismatch comparing {} to {}", this, discoveredNode); - } - } - if (selectedNode == null) { - throw new ElasticsearchException("no node matching {} found in {}", this, discoveredNodes); - } - - return selectedNode; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - NodeDescription that = (NodeDescription) o; - return Objects.equals(id, that.id) && - Objects.equals(name, that.name); - } - - @Override - public int hashCode() { - return Objects.hash(id, name); - } - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java deleted file mode 100644 index acaef284a5420..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesAction.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.Action; -import org.elasticsearch.common.io.stream.Writeable.Reader; - -public class GetDiscoveredNodesAction extends Action { - public static final GetDiscoveredNodesAction INSTANCE = new GetDiscoveredNodesAction(); - public static final String NAME = "cluster:admin/bootstrap/discover_nodes"; - - private GetDiscoveredNodesAction() { - super(NAME); - } - - @Override - public GetDiscoveredNodesResponse newResponse() { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public Reader getResponseReader() { - return GetDiscoveredNodesResponse::new; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java deleted file mode 100644 index f91a4de5263be..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequest.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; - -/** - * Request the set of master-eligible nodes discovered by this node. Most useful in a brand-new cluster as a precursor to setting the - * initial configuration using {@link BootstrapClusterRequest}. - */ -public class GetDiscoveredNodesRequest extends ActionRequest { - - @Nullable // if the request should wait indefinitely - private TimeValue timeout = TimeValue.timeValueSeconds(30); - - private List requiredNodes = Collections.emptyList(); - - public GetDiscoveredNodesRequest() { - } - - public GetDiscoveredNodesRequest(StreamInput in) throws IOException { - super(in); - timeout = in.readOptionalTimeValue(); - requiredNodes = in.readList(StreamInput::readString); - } - - /** - * Sometimes it is useful to wait until enough nodes have been discovered, rather than failing immediately. This parameter controls how - * long to wait, and defaults to 30s. - * - * @param timeout how long to wait to discover sufficiently many nodes to respond successfully. - */ - public void setTimeout(@Nullable TimeValue timeout) { - if (timeout != null && timeout.compareTo(TimeValue.ZERO) < 0) { - throw new IllegalArgumentException("negative timeout of [" + timeout + "] is not allowed"); - } - this.timeout = timeout; - } - - /** - * Sometimes it is useful to wait until enough nodes have been discovered, rather than failing immediately. This parameter controls how - * long to wait, and defaults to 30s. - * - * @return how long to wait to discover sufficiently many nodes to respond successfully. - */ - @Nullable - public TimeValue getTimeout() { - return timeout; - } - - /** - * Sometimes it is useful only to receive a successful response after discovering a certain set of master-eligible nodes. - * This parameter gives the names or transport addresses of the expected nodes. - * - * @return list of expected nodes - */ - public List getRequiredNodes() { - return requiredNodes; - } - - /** - * Sometimes it is useful only to receive a successful response after discovering a certain set of master-eligible nodes. - * This parameter gives the names or transport addresses of the expected nodes. - * - * @param requiredNodes list of expected nodes - */ - public void setRequiredNodes(final List requiredNodes) { - this.requiredNodes = requiredNodes; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeOptionalTimeValue(timeout); - out.writeStringList(requiredNodes); - } - - @Override - public String toString() { - return "GetDiscoveredNodesRequest{" + - "timeout=" + timeout + - ", requiredNodes=" + requiredNodes + "}"; - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java deleted file mode 100644 index f697e16c03c2c..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponse.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -/** - * Response to {@link GetDiscoveredNodesRequest}, containing the set of master-eligible nodes that were discovered. - */ -public class GetDiscoveredNodesResponse extends ActionResponse { - private final Set nodes; - - public GetDiscoveredNodesResponse(Set nodes) { - this.nodes = Collections.unmodifiableSet(new HashSet<>(nodes)); - } - - public GetDiscoveredNodesResponse(StreamInput in) throws IOException { - super(in); - nodes = Collections.unmodifiableSet(in.readSet(DiscoveryNode::new)); - } - - /** - * @return the set of nodes that were discovered. - */ - public Set getNodes() { - return nodes; - } - - /** - * @return a bootstrap configuration constructed from the set of nodes that were discovered, in order to make a - * {@link BootstrapClusterRequest}. - */ - public BootstrapConfiguration getBootstrapConfiguration() { - return new BootstrapConfiguration(nodes.stream().map(NodeDescription::new).collect(Collectors.toList())); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("usage of Streamable is to be replaced by Writeable"); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeCollection(nodes); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java deleted file mode 100644 index 32a9f39cc0db8..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterAction.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.transport.TransportService; - -import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; - -public class TransportBootstrapClusterAction extends HandledTransportAction { - - @Nullable // TODO make this not nullable - private final Coordinator coordinator; - private final TransportService transportService; - private final String discoveryType; - - @Inject - public TransportBootstrapClusterAction(Settings settings, ActionFilters actionFilters, TransportService transportService, - Discovery discovery) { - super(BootstrapClusterAction.NAME, transportService, actionFilters, BootstrapClusterRequest::new); - this.transportService = transportService; - this.discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - if (discovery instanceof Coordinator) { - coordinator = (Coordinator) discovery; - } else { - coordinator = null; - } - } - - @Override - protected void doExecute(Task task, BootstrapClusterRequest request, ActionListener listener) { - if (coordinator == null) { // TODO remove when not nullable - throw new IllegalArgumentException("cluster bootstrapping is not supported by discovery type [" + discoveryType + "]"); - } - - final DiscoveryNode localNode = transportService.getLocalNode(); - assert localNode != null; - if (localNode.isMasterNode() == false) { - throw new IllegalArgumentException( - "this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node"); - } - - transportService.getThreadPool().generic().execute(new AbstractRunnable() { - @Override - public void doRun() { - listener.onResponse(new BootstrapClusterResponse( - coordinator.setInitialConfiguration(request.getBootstrapConfiguration()) == false)); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - - @Override - public String toString() { - return "setting initial configuration with " + request; - } - }); - } -} diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java deleted file mode 100644 index 6f6336c3bd5f3..0000000000000 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesAction.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.util.concurrent.ListenableFuture; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportService; - -import java.util.HashSet; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; - -import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_TYPE_SETTING; - -public class TransportGetDiscoveredNodesAction extends HandledTransportAction { - - @Nullable // TODO make this not nullable - private final Coordinator coordinator; - private final TransportService transportService; - private final String discoveryType; - - @Inject - public TransportGetDiscoveredNodesAction(Settings settings, ActionFilters actionFilters, TransportService transportService, - Discovery discovery) { - super(GetDiscoveredNodesAction.NAME, transportService, actionFilters, - (Reader) GetDiscoveredNodesRequest::new); - - this.discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - this.transportService = transportService; - if (discovery instanceof Coordinator) { - coordinator = (Coordinator) discovery; - } else { - coordinator = null; - } - } - - @Override - protected void doExecute(Task task, GetDiscoveredNodesRequest request, ActionListener listener) { - if (coordinator == null) { // TODO remove when not nullable - throw new IllegalArgumentException("discovered nodes are not exposed by discovery type [" + discoveryType + "]"); - } - - final DiscoveryNode localNode = transportService.getLocalNode(); - assert localNode != null; - if (localNode.isMasterNode() == false) { - throw new IllegalArgumentException( - "this node is not master-eligible, but discovered nodes are only exposed by master-eligible nodes"); - } - final ExecutorService directExecutor = EsExecutors.newDirectExecutorService(); - final AtomicBoolean listenerNotified = new AtomicBoolean(); - final ListenableFuture listenableFuture = new ListenableFuture<>(); - final ThreadPool threadPool = transportService.getThreadPool(); - listenableFuture.addListener(listener, directExecutor, threadPool.getThreadContext()); - // TODO make it so that listenableFuture copes with multiple completions, and then remove listenerNotified - - final ActionListener> respondIfRequestSatisfied = new ActionListener>() { - @Override - public void onResponse(Iterable nodes) { - final Set nodesSet = new LinkedHashSet<>(); - nodesSet.add(localNode); - nodes.forEach(nodesSet::add); - logger.trace("discovered {}", nodesSet); - try { - if (checkWaitRequirements(request, nodesSet)) { - final GetDiscoveredNodesResponse response = new GetDiscoveredNodesResponse(nodesSet); - if (listenerNotified.compareAndSet(false, true)) { - listenableFuture.onResponse(response); - } - } - } catch (Exception e) { - onFailure(e); - } - } - - @Override - public void onFailure(Exception e) { - if (listenerNotified.compareAndSet(false, true)) { - listenableFuture.onFailure(e); - } - } - - @Override - public String toString() { - return "waiting for " + request; - } - }; - - final Releasable releasable = coordinator.withDiscoveryListener(respondIfRequestSatisfied); - listenableFuture.addListener(ActionListener.wrap(releasable::close), directExecutor, threadPool.getThreadContext()); - - if (coordinator.isInitialConfigurationSet()) { - respondIfRequestSatisfied.onFailure(new ClusterAlreadyBootstrappedException()); - } else { - respondIfRequestSatisfied.onResponse(coordinator.getFoundPeers()); - } - - if (request.getTimeout() != null) { - threadPool.schedule(request.getTimeout(), Names.SAME, new Runnable() { - @Override - public void run() { - respondIfRequestSatisfied.onFailure(new ElasticsearchTimeoutException("timed out while waiting for " + request)); - } - - @Override - public String toString() { - return "timeout handler for " + request; - } - }); - } - } - - private static boolean matchesRequirement(DiscoveryNode discoveryNode, String requirement) { - return discoveryNode.getName().equals(requirement) - || discoveryNode.getAddress().toString().equals(requirement) - || discoveryNode.getAddress().getAddress().equals(requirement); - } - - private static boolean checkWaitRequirements(GetDiscoveredNodesRequest request, Set nodes) { - List requirements = request.getRequiredNodes(); - final Set selectedNodes = new HashSet<>(); - for (final String requirement : requirements) { - final Set matchingNodes - = nodes.stream().filter(n -> matchesRequirement(n, requirement)).collect(Collectors.toSet()); - - if (matchingNodes.isEmpty()) { - return false; - } - if (matchingNodes.size() > 1) { - throw new IllegalArgumentException("[" + requirement + "] matches " + matchingNodes); - } - - for (final DiscoveryNode matchingNode : matchingNodes) { - if (selectedNodes.add(matchingNode) == false) { - throw new IllegalArgumentException("[" + matchingNode + "] matches " + - requirements.stream().filter(r -> matchesRequirement(matchingNode, requirement)).collect(Collectors.toList())); - } - } - } - - return true; - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java deleted file mode 100644 index cc1c77c88477c..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterAlreadyBootstrappedException.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster.coordination; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.io.stream.StreamInput; - -import java.io.IOException; - -/** - * Exception thrown if trying to discovery nodes in order to perform cluster bootstrapping, but a cluster is formed before all the required - * nodes are discovered. - */ -public class ClusterAlreadyBootstrappedException extends ElasticsearchException { - public ClusterAlreadyBootstrappedException() { - super("node has already joined a bootstrapped cluster, bootstrapping is not required"); - } - - public ClusterAlreadyBootstrappedException(StreamInput in) throws IOException { - super(in); - } -} diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index fc3f4493104fc..d21c54c03e4e5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -21,56 +21,73 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesResponse; +import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; -import java.io.IOException; -import java.util.Collections; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BooleanSupplier; +import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; import java.util.stream.Stream; +import java.util.stream.StreamSupport; +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableSet; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; public class ClusterBootstrapService { - private static final Logger logger = LogManager.getLogger(ClusterBootstrapService.class); - public static final Setting> INITIAL_MASTER_NODES_SETTING = - Setting.listSetting("cluster.initial_master_nodes", Collections.emptyList(), Function.identity(), Property.NodeScope); + Setting.listSetting("cluster.initial_master_nodes", emptyList(), Function.identity(), Property.NodeScope); public static final Setting UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING = Setting.timeSetting("discovery.unconfigured_bootstrap_timeout", TimeValue.timeValueSeconds(3), TimeValue.timeValueMillis(1), Property.NodeScope); - private final List initialMasterNodes; - @Nullable + static final String BOOTSTRAP_PLACEHOLDER_PREFIX = "{bootstrap-placeholder}-"; + + private static final Logger logger = LogManager.getLogger(ClusterBootstrapService.class); + private final Set bootstrapRequirements; + @Nullable // null if discoveryIsConfigured() private final TimeValue unconfiguredBootstrapTimeout; private final TransportService transportService; - private volatile boolean running; + private final Supplier> discoveredNodesSupplier; + private final BooleanSupplier isBootstrappedSupplier; + private final Consumer votingConfigurationConsumer; + private final AtomicBoolean bootstrappingPermitted = new AtomicBoolean(true); + + public ClusterBootstrapService(Settings settings, TransportService transportService, + Supplier> discoveredNodesSupplier, BooleanSupplier isBootstrappedSupplier, + Consumer votingConfigurationConsumer) { + + final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); + bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); + if (bootstrapRequirements.size() != initialMasterNodes.size()) { + throw new IllegalArgumentException( + "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + } - public ClusterBootstrapService(Settings settings, TransportService transportService) { - initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); this.transportService = transportService; + this.discoveredNodesSupplier = discoveredNodesSupplier; + this.isBootstrappedSupplier = isBootstrappedSupplier; + this.votingConfigurationConsumer = votingConfigurationConsumer; } public static boolean discoveryIsConfigured(Settings settings) { @@ -78,157 +95,135 @@ public static boolean discoveryIsConfigured(Settings settings) { .anyMatch(s -> s.exists(settings)); } - public void start() { - assert running == false; - running = true; + void onFoundPeersUpdated() { + final Set nodes = getDiscoveredNodes(); + if (transportService.getLocalNode().isMasterNode() && bootstrapRequirements.isEmpty() == false + && isBootstrappedSupplier.getAsBoolean() == false && nodes.stream().noneMatch(Coordinator::isZen1Node)) { + + final Tuple,List> requirementMatchingResult; + try { + requirementMatchingResult = checkRequirements(nodes); + } catch (IllegalStateException e) { + logger.warn("bootstrapping cancelled", e); + bootstrappingPermitted.set(false); + return; + } + + final Set nodesMatchingRequirements = requirementMatchingResult.v1(); + final List unsatisfiedRequirements = requirementMatchingResult.v2(); + logger.trace("nodesMatchingRequirements={}, unsatisfiedRequirements={}, bootstrapRequirements={}", + nodesMatchingRequirements, unsatisfiedRequirements, bootstrapRequirements); + + if (nodesMatchingRequirements.size() * 2 > bootstrapRequirements.size()) { + startBootstrap(nodesMatchingRequirements, unsatisfiedRequirements); + } + } + } + + void scheduleUnconfiguredBootstrap() { + if (unconfiguredBootstrapTimeout == null) { + return; + } if (transportService.getLocalNode().isMasterNode() == false) { return; } - if (unconfiguredBootstrapTimeout != null) { - logger.info("no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] " + - "unless existing master is discovered", unconfiguredBootstrapTimeout); - final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); + logger.info("no discovery configuration found, will perform best-effort cluster bootstrapping after [{}] " + + "unless existing master is discovered", unconfiguredBootstrapTimeout); + + transportService.getThreadPool().scheduleUnlessShuttingDown(unconfiguredBootstrapTimeout, Names.GENERIC, new Runnable() { + @Override + public void run() { + final Set discoveredNodes = getDiscoveredNodes(); + final List zen1Nodes = discoveredNodes.stream().filter(Coordinator::isZen1Node).collect(Collectors.toList()); + if (zen1Nodes.isEmpty()) { + logger.debug("performing best-effort cluster bootstrapping with {}", discoveredNodes); + startBootstrap(discoveredNodes, emptyList()); + } else { + logger.info("avoiding best-effort cluster bootstrapping due to discovery of pre-7.0 nodes {}", zen1Nodes); + } + } + + @Override + public String toString() { + return "unconfigured-discovery delayed bootstrap"; + } + }); + } + + private Set getDiscoveredNodes() { + return Stream.concat(Stream.of(transportService.getLocalNode()), + StreamSupport.stream(discoveredNodesSupplier.get().spliterator(), false)).collect(Collectors.toSet()); + } + + private void startBootstrap(Set discoveryNodes, List unsatisfiedRequirements) { + assert discoveryNodes.stream().allMatch(DiscoveryNode::isMasterNode) : discoveryNodes; + assert discoveryNodes.stream().noneMatch(Coordinator::isZen1Node) : discoveryNodes; + assert unsatisfiedRequirements.size() < discoveryNodes.size() : discoveryNodes + " smaller than " + unsatisfiedRequirements; + if (bootstrappingPermitted.compareAndSet(true, false)) { + doBootstrap(new VotingConfiguration(Stream.concat(discoveryNodes.stream().map(DiscoveryNode::getId), + unsatisfiedRequirements.stream().map(s -> BOOTSTRAP_PLACEHOLDER_PREFIX + s)) + .collect(Collectors.toSet()))); + } + } + + public static boolean isBootstrapPlaceholder(String nodeId) { + return nodeId.startsWith(BOOTSTRAP_PLACEHOLDER_PREFIX); + } + + private void doBootstrap(VotingConfiguration votingConfiguration) { + assert transportService.getLocalNode().isMasterNode(); - transportService.getThreadPool().scheduleUnlessShuttingDown(unconfiguredBootstrapTimeout, Names.SAME, new Runnable() { + try { + votingConfigurationConsumer.accept(votingConfiguration); + } catch (Exception e) { + logger.warn(new ParameterizedMessage("exception when bootstrapping with {}, rescheduling", votingConfiguration), e); + transportService.getThreadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(10), Names.GENERIC, + new Runnable() { @Override public void run() { - // TODO: remove the following line once schedule method properly preserves thread context - threadContext.markAsSystemContext(); - final GetDiscoveredNodesRequest request = new GetDiscoveredNodesRequest(); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), GetDiscoveredNodesAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - logger.debug("discovered {}, starting to bootstrap", response.getNodes()); - awaitBootstrap(response.getBootstrapConfiguration()); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - if (rootCause instanceof ClusterAlreadyBootstrappedException) { - logger.debug(rootCause.getMessage(), rootCause); - } else { - logger.warn("discovery attempt failed", exp); - } - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - }); + doBootstrap(votingConfiguration); } @Override public String toString() { - return "unconfigured-discovery delayed bootstrap"; + return "retry of failed bootstrapping with " + votingConfiguration; } - }); - - } - } else if (initialMasterNodes.isEmpty() == false) { - logger.debug("waiting for discovery of master-eligible nodes matching {}", initialMasterNodes); - - final ThreadContext threadContext = transportService.getThreadPool().getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - threadContext.markAsSystemContext(); - - final GetDiscoveredNodesRequest request = new GetDiscoveredNodesRequest(); - request.setRequiredNodes(initialMasterNodes); - request.setTimeout(null); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), GetDiscoveredNodesAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - assert response.getNodes().stream().allMatch(DiscoveryNode::isMasterNode); - logger.debug("discovered {}, starting to bootstrap", response.getNodes()); - awaitBootstrap(response.getBootstrapConfiguration()); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("discovery attempt failed", exp); - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - }); - } + } + ); } } - public void stop() { - running = false; + private static boolean matchesRequirement(DiscoveryNode discoveryNode, String requirement) { + return discoveryNode.getName().equals(requirement) + || discoveryNode.getAddress().toString().equals(requirement) + || discoveryNode.getAddress().getAddress().equals(requirement); } - private void awaitBootstrap(final BootstrapConfiguration bootstrapConfiguration) { - if (running == false) { - logger.debug("awaitBootstrap: not running"); - return; - } + private Tuple,List> checkRequirements(Set nodes) { + final Set selectedNodes = new HashSet<>(); + final List unmatchedRequirements = new ArrayList<>(); + for (final String bootstrapRequirement : bootstrapRequirements) { + final Set matchingNodes + = nodes.stream().filter(n -> matchesRequirement(n, bootstrapRequirement)).collect(Collectors.toSet()); - BootstrapClusterRequest request = new BootstrapClusterRequest(bootstrapConfiguration); - logger.trace("sending {}", request); - transportService.sendRequest(transportService.getLocalNode(), BootstrapClusterAction.NAME, request, - new TransportResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - logger.debug("automatic cluster bootstrapping successful: received {}", response); - } + if (matchingNodes.size() == 0) { + unmatchedRequirements.add(bootstrapRequirement); + } - @Override - public void handleException(TransportException exp) { - // log a warning since a failure here indicates a bad problem, such as: - // - bootstrap configuration resolution failed (e.g. discovered nodes no longer match those in the bootstrap config) - // - discovered nodes no longer form a quorum in the bootstrap config - logger.warn(new ParameterizedMessage("automatic cluster bootstrapping failed, retrying [{}]", - bootstrapConfiguration.getNodeDescriptions()), exp); - - // There's not really much else we can do apart from retry and hope that the problem goes away. The retry is delayed - // since a tight loop here is unlikely to help. - transportService.getThreadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(10), Names.SAME, new Runnable() { - @Override - public void run() { - // TODO: remove the following line once schedule method properly preserves thread context - transportService.getThreadPool().getThreadContext().markAsSystemContext(); - awaitBootstrap(bootstrapConfiguration); - } - - @Override - public String toString() { - return "retry bootstrapping with " + bootstrapConfiguration.getNodeDescriptions(); - } - }); - } + if (matchingNodes.size() > 1) { + throw new IllegalStateException("requirement [" + bootstrapRequirement + "] matches multiple nodes: " + matchingNodes); + } - @Override - public String executor() { - return Names.SAME; + for (final DiscoveryNode matchingNode : matchingNodes) { + if (selectedNodes.add(matchingNode) == false) { + throw new IllegalStateException("node [" + matchingNode + "] matches multiple requirements: " + + bootstrapRequirements.stream().filter(r -> matchesRequirement(matchingNode, r)).collect(Collectors.toList())); } + } + } - @Override - public BootstrapClusterResponse read(StreamInput in) throws IOException { - return new BootstrapClusterResponse(in); - } - }); + return Tuple.tuple(selectedNodes, unmatchedRequirements); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 9fc408fc9479c..cc58628b53893 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -34,6 +34,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; @@ -188,13 +189,22 @@ String getDescription() { private String describeQuorum(VotingConfiguration votingConfiguration) { final Set nodeIds = votingConfiguration.getNodeIds(); assert nodeIds.isEmpty() == false; + final int requiredNodes = nodeIds.size() / 2 + 1; + + final Set realNodeIds = new HashSet<>(nodeIds); + realNodeIds.removeIf(ClusterBootstrapService::isBootstrapPlaceholder); + assert requiredNodes <= realNodeIds.size() : nodeIds; if (nodeIds.size() == 1) { - return "a node with id " + nodeIds; + return "a node with id " + realNodeIds; } else if (nodeIds.size() == 2) { - return "two nodes with ids " + nodeIds; + return "two nodes with ids " + realNodeIds; } else { - return "at least " + (nodeIds.size() / 2 + 1) + " nodes with ids from " + nodeIds; + if (requiredNodes < realNodeIds.size()) { + return "at least " + requiredNodes + " nodes with ids from " + realNodeIds; + } else { + return requiredNodes + " nodes with ids " + realNodeIds; + } } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 72fe2e081de74..968a9a5f01f7a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -24,7 +24,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -83,7 +82,6 @@ import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; import static org.elasticsearch.discovery.DiscoverySettings.NO_MASTER_BLOCK_ID; import static org.elasticsearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; @@ -139,8 +137,6 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private JoinHelper.JoinAccumulator joinAccumulator; private Optional currentPublication = Optional.empty(); - private final Set>> discoveredNodesListeners = newConcurrentSet(); - public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSettings, TransportService transportService, NamedWriteableRegistry namedWriteableRegistry, AllocationService allocationService, MasterService masterService, Supplier persistedStateSupplier, UnicastHostsProvider unicastHostsProvider, @@ -170,7 +166,8 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.clusterApplier = clusterApplier; masterService.setClusterStateSupplier(this::getStateForMasterService); this.reconfigurator = new Reconfigurator(settings, clusterSettings); - this.clusterBootstrapService = new ClusterBootstrapService(settings, transportService); + this.clusterBootstrapService = new ClusterBootstrapService(settings, transportService, this::getFoundPeers, + this::isInitialConfigurationSet, this::setInitialConfiguration); this.discoveryUpgradeService = new DiscoveryUpgradeService(settings, clusterSettings, transportService, this::isInitialConfigurationSet, joinHelper, peerFinder::getFoundPeers, this::setInitialConfiguration); this.lagDetector = new LagDetector(settings, transportService.getThreadPool(), n -> removeNode(n, "lagging"), @@ -296,12 +293,6 @@ PublishWithJoinResponse handlePublishRequest(PublishRequest publishRequest) { becomeFollower("handlePublishRequest", sourceNode); // also updates preVoteCollector } - if (isInitialConfigurationSet()) { - for (final ActionListener> discoveredNodesListener : discoveredNodesListeners) { - discoveredNodesListener.onFailure(new ClusterAlreadyBootstrappedException()); - } - } - return new PublishWithJoinResponse(publishResponse, joinWithDestination(lastJoin, sourceNode, publishRequest.getAcceptedState().term())); } @@ -598,16 +589,12 @@ public void startInitialJoin() { synchronized (mutex) { becomeCandidate("startInitialJoin"); } - - if (isInitialConfigurationSet() == false) { - clusterBootstrapService.start(); - } + clusterBootstrapService.scheduleUnconfiguredBootstrap(); } @Override protected void doStop() { configuredHostsResolver.stop(); - clusterBootstrapService.stop(); } @Override @@ -706,21 +693,6 @@ public boolean isInitialConfigurationSet() { return getStateForMasterService().getLastAcceptedConfiguration().isEmpty() == false; } - /** - * Sets the initial configuration by resolving the given {@link BootstrapConfiguration} to concrete nodes. This method is safe to call - * more than once, as long as each call's bootstrap configuration resolves to the same set of nodes. - * - * @param bootstrapConfiguration A description of the nodes that should form the initial configuration. - * @return whether this call successfully set the initial configuration - if false, the cluster has already been bootstrapped. - */ - public boolean setInitialConfiguration(final BootstrapConfiguration bootstrapConfiguration) { - final List selfAndDiscoveredPeers = new ArrayList<>(); - selfAndDiscoveredPeers.add(getLocalNode()); - getFoundPeers().forEach(selfAndDiscoveredPeers::add); - final VotingConfiguration votingConfiguration = bootstrapConfiguration.resolve(selfAndDiscoveredPeers); - return setInitialConfiguration(votingConfiguration); - } - /** * Sets the initial configuration to the given {@link VotingConfiguration}. This method is safe to call * more than once, as long as the argument to each call is the same. @@ -733,13 +705,10 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura final ClusterState currentState = getStateForMasterService(); if (isInitialConfigurationSet()) { + logger.debug("initial configuration already set, ignoring {}", votingConfiguration); return false; } - if (mode != Mode.CANDIDATE) { - throw new CoordinationStateRejectedException("Cannot set initial configuration in mode " + mode); - } - final List knownNodes = new ArrayList<>(); knownNodes.add(getLocalNode()); peerFinder.getFoundPeers().forEach(knownNodes::add); @@ -899,8 +868,6 @@ private ClusterState clusterStateWithNoMasterBlock(ClusterState clusterState) { public void publish(ClusterChangedEvent clusterChangedEvent, ActionListener publishListener, AckListener ackListener) { try { synchronized (mutex) { - assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; - if (mode != Mode.LEADER) { logger.debug(() -> new ParameterizedMessage("[{}] failed publication as not currently leading", clusterChangedEvent.source())); @@ -1019,9 +986,8 @@ protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { @Override protected void onFoundPeersUpdated() { - final Iterable foundPeers; synchronized (mutex) { - foundPeers = getFoundPeers(); + final Iterable foundPeers = getFoundPeers(); if (mode == Mode.CANDIDATE) { final CoordinationState.VoteCollection expectedVotes = new CoordinationState.VoteCollection(); foundPeers.forEach(expectedVotes::addVote); @@ -1039,9 +1005,7 @@ protected void onFoundPeersUpdated() { } } - for (final ActionListener> discoveredNodesListener : discoveredNodesListeners) { - discoveredNodesListener.onResponse(foundPeers); - } + clusterBootstrapService.onFoundPeersUpdated(); } } @@ -1076,14 +1040,6 @@ public String toString() { }); } - public Releasable withDiscoveryListener(ActionListener> listener) { - discoveredNodesListeners.add(listener); - return () -> { - boolean removed = discoveredNodesListeners.remove(listener); - assert removed : listener; - }; - } - public Iterable getFoundPeers() { // TODO everyone takes this and adds the local node. Maybe just add the local node here? return peerFinder.getFoundPeers(); diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 489a98dcbaac6..b4ce907d6f7b3 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -32,7 +32,6 @@ import org.elasticsearch.client.AbstractClientHeadersTestCase; import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; import org.elasticsearch.cluster.coordination.CoordinationStateRejectedException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IllegalShardRoutingStateException; @@ -809,8 +808,7 @@ public void testIds() { ids.put(148, UnknownNamedObjectException.class); ids.put(149, MultiBucketConsumerService.TooManyBucketsException.class); ids.put(150, CoordinationStateRejectedException.class); - ids.put(151, ClusterAlreadyBootstrappedException.class); - ids.put(152, SnapshotInProgressException.class); + ids.put(151, SnapshotInProgressException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java deleted file mode 100644 index ee9c58413b350..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterRequestTests.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.Collections; - -import static org.hamcrest.Matchers.equalTo; - -public class BootstrapClusterRequestTests extends ESTestCase { - - public void testSerialization() throws IOException { - final BootstrapConfiguration bootstrapConfiguration - = new BootstrapConfiguration(Collections.singletonList(new NodeDescription(null, randomAlphaOfLength(10)))); - final BootstrapClusterRequest original = new BootstrapClusterRequest(bootstrapConfiguration); - assertNull(original.validate()); - final BootstrapClusterRequest deserialized = copyWriteable(original, writableRegistry(), BootstrapClusterRequest::new); - assertThat(deserialized.getBootstrapConfiguration(), equalTo(bootstrapConfiguration)); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java deleted file mode 100644 index fb33dbc5fcbd6..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapClusterResponseTests.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.hamcrest.Matchers.equalTo; - -public class BootstrapClusterResponseTests extends ESTestCase { - public void testSerialization() throws IOException { - final BootstrapClusterResponse original = new BootstrapClusterResponse(randomBoolean()); - final BootstrapClusterResponse deserialized = copyWriteable(original, writableRegistry(), BootstrapClusterResponse::new); - assertThat(deserialized.getAlreadyBootstrapped(), equalTo(original.getAlreadyBootstrapped())); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java deleted file mode 100644 index fc2e24017e76a..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/BootstrapConfigurationTests.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.EqualsHashCodeTestUtils; -import org.elasticsearch.test.EqualsHashCodeTestUtils.CopyFunction; - -import java.util.ArrayList; -import java.util.List; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.startsWith; - -public class BootstrapConfigurationTests extends ESTestCase { - - public void testEqualsHashcodeSerialization() { - // Note: the explicit cast of the CopyFunction is needed for some IDE (specifically Eclipse 4.8.0) to infer the right type - EqualsHashCodeTestUtils.checkEqualsAndHashCode(randomBootstrapConfiguration(), - (CopyFunction) bootstrapConfiguration -> copyWriteable(bootstrapConfiguration, writableRegistry(), - BootstrapConfiguration::new), - this::mutate); - } - - public void testNodeDescriptionResolvedByName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - assertThat(new NodeDescription(null, expectedNode.getName()).resolve(discoveryNodes), equalTo(expectedNode)); - } - - public void testNodeDescriptionResolvedByIdAndName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - assertThat(new NodeDescription(expectedNode).resolve(discoveryNodes), equalTo(expectedNode)); - } - - public void testRejectsMismatchedId() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(randomAlphaOfLength(11), expectedNode.getName()).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("node id mismatch comparing ")); - } - - public void testRejectsMismatchedName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode expectedNode = randomFrom(discoveryNodes); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(expectedNode.getId(), randomAlphaOfLength(11)).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("node name mismatch comparing ")); - } - - public void testFailsIfNoMatch() { - final List discoveryNodes = randomDiscoveryNodes(); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> randomNodeDescription().resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("no node matching ")); - } - - public void testFailsIfDuplicateMatchOnName() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode discoveryNode = randomFrom(discoveryNodes); - discoveryNodes.add(new DiscoveryNode(discoveryNode.getName(), randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), - singleton(Role.MASTER), Version.CURRENT)); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(null, discoveryNode.getName()).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("discovered multiple nodes matching ")); - } - - public void testFailsIfDuplicatedNode() { - final List discoveryNodes = randomDiscoveryNodes(); - final DiscoveryNode discoveryNode = randomFrom(discoveryNodes); - discoveryNodes.add(discoveryNode); - final ElasticsearchException e = expectThrows(ElasticsearchException.class, - () -> new NodeDescription(discoveryNode).resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("discovered multiple nodes matching ")); - } - - public void testResolvesEntireConfiguration() { - final List discoveryNodes = randomDiscoveryNodes(); - final List selectedNodes = randomSubsetOf(randomIntBetween(1, discoveryNodes.size()), discoveryNodes); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration(selectedNodes.stream() - .map(discoveryNode -> randomBoolean() ? new NodeDescription(discoveryNode) : new NodeDescription(null, discoveryNode.getName())) - .collect(Collectors.toList())); - - final VotingConfiguration expectedConfiguration - = new VotingConfiguration(selectedNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet())); - final VotingConfiguration votingConfiguration = bootstrapConfiguration.resolve(discoveryNodes); - assertThat(votingConfiguration, equalTo(expectedConfiguration)); - } - - public void testRejectsDuplicatedDescriptions() { - final List discoveryNodes = randomDiscoveryNodes(); - final List selectedNodes = randomSubsetOf(randomIntBetween(1, discoveryNodes.size()), discoveryNodes); - final List selectedNodeDescriptions = selectedNodes.stream() - .map(discoveryNode -> randomBoolean() ? new NodeDescription(discoveryNode) : new NodeDescription(null, discoveryNode.getName())) - .collect(Collectors.toList()); - final NodeDescription toDuplicate = randomFrom(selectedNodeDescriptions); - selectedNodeDescriptions.add(randomBoolean() ? toDuplicate : new NodeDescription(null, toDuplicate.getName())); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration(selectedNodeDescriptions); - - final ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> bootstrapConfiguration.resolve(discoveryNodes)); - assertThat(e.getMessage(), startsWith("multiple nodes matching ")); - } - - private NodeDescription mutate(NodeDescription original) { - if (randomBoolean()) { - return new NodeDescription(original.getId(), randomAlphaOfLength(21 - original.getName().length())); - } else { - if (original.getId() == null) { - return new NodeDescription(randomAlphaOfLength(10), original.getName()); - } else if (randomBoolean()) { - return new NodeDescription(randomAlphaOfLength(21 - original.getId().length()), original.getName()); - } else { - return new NodeDescription(null, original.getName()); - } - } - } - - protected BootstrapConfiguration mutate(BootstrapConfiguration original) { - final List newDescriptions = new ArrayList<>(original.getNodeDescriptions()); - final int mutateElement = randomIntBetween(0, newDescriptions.size()); - if (mutateElement == newDescriptions.size()) { - newDescriptions.add(randomIntBetween(0, newDescriptions.size()), randomNodeDescription()); - } else { - if (newDescriptions.size() > 1 && randomBoolean()) { - newDescriptions.remove(mutateElement); - } else { - newDescriptions.set(mutateElement, mutate(newDescriptions.get(mutateElement))); - } - } - return new BootstrapConfiguration(newDescriptions); - } - - protected NodeDescription randomNodeDescription() { - return new NodeDescription(randomBoolean() ? null : randomAlphaOfLength(10), randomAlphaOfLength(10)); - } - - protected BootstrapConfiguration randomBootstrapConfiguration() { - final int size = randomIntBetween(1, 5); - final List nodeDescriptions = new ArrayList<>(size); - while (nodeDescriptions.size() <= size) { - nodeDescriptions.add(randomNodeDescription()); - } - return new BootstrapConfiguration(nodeDescriptions); - } - - protected List randomDiscoveryNodes() { - final int size = randomIntBetween(1, 5); - final List nodes = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - nodes.add(new DiscoveryNode(randomAlphaOfLength(10), randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), - singleton(Role.MASTER), Version.CURRENT)); - } - return nodes; - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java deleted file mode 100644 index 676e2e958cac7..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesRequestTests.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; -import static org.hamcrest.Matchers.startsWith; -import static org.hamcrest.core.Is.is; - -public class GetDiscoveredNodesRequestTests extends ESTestCase { - - public void testTimeoutValidation() { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - assertThat("default value is 30s", getDiscoveredNodesRequest.getTimeout(), is(TimeValue.timeValueSeconds(30))); - - final TimeValue newTimeout = TimeValue.parseTimeValue(randomTimeValue(), "timeout"); - getDiscoveredNodesRequest.setTimeout(newTimeout); - assertThat("value updated", getDiscoveredNodesRequest.getTimeout(), equalTo(newTimeout)); - - final IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, - () -> getDiscoveredNodesRequest.setTimeout(TimeValue.timeValueNanos(randomLongBetween(-10, -1)))); - assertThat(exception.getMessage(), startsWith("negative timeout of ")); - assertThat(exception.getMessage(), endsWith(" is not allowed")); - - getDiscoveredNodesRequest.setTimeout(null); - assertThat("value updated", getDiscoveredNodesRequest.getTimeout(), nullValue()); - } - - public void testSerialization() throws IOException { - final GetDiscoveredNodesRequest originalRequest = new GetDiscoveredNodesRequest(); - - if (randomBoolean()) { - originalRequest.setTimeout(TimeValue.parseTimeValue(randomTimeValue(), "timeout")); - } else if (randomBoolean()) { - originalRequest.setTimeout(null); - } - - final GetDiscoveredNodesRequest deserialized = copyWriteable(originalRequest, writableRegistry(), GetDiscoveredNodesRequest::new); - - assertThat(deserialized.getTimeout(), equalTo(originalRequest.getTimeout())); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java deleted file mode 100644 index 7d2fc602e66c6..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/GetDiscoveredNodesResponseTests.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.Version; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNode.Role; -import org.elasticsearch.common.UUIDs; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.util.HashSet; -import java.util.Set; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyMap; -import static java.util.Collections.singleton; -import static org.hamcrest.Matchers.equalTo; - -public class GetDiscoveredNodesResponseTests extends ESTestCase { - public void testSerialization() throws IOException { - final GetDiscoveredNodesResponse original = new GetDiscoveredNodesResponse(randomDiscoveryNodeSet()); - final GetDiscoveredNodesResponse deserialized = copyWriteable(original, writableRegistry(), GetDiscoveredNodesResponse::new); - assertThat(deserialized.getNodes(), equalTo(original.getNodes())); - } - - private Set randomDiscoveryNodeSet() { - final int size = randomIntBetween(1, 10); - final Set nodes = new HashSet<>(size); - while (nodes.size() < size) { - assertTrue(nodes.add(new DiscoveryNode(randomAlphaOfLength(10), randomAlphaOfLength(10), - UUIDs.randomBase64UUID(random()), randomAlphaOfLength(10), randomAlphaOfLength(10), buildNewFakeTransportAddress(), - emptyMap(), singleton(Role.MASTER), Version.CURRENT))); - } - return nodes; - } - - public void testConversionToBootstrapConfiguration() { - final Set nodes = randomDiscoveryNodeSet(); - assertThat(new GetDiscoveredNodesResponse(nodes).getBootstrapConfiguration().resolve(nodes).getNodeIds(), - equalTo(nodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()))); - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java deleted file mode 100644 index 31486a52bd08f..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportBootstrapClusterActionTests.java +++ /dev/null @@ -1,233 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; -import org.elasticsearch.cluster.coordination.NoOpClusterApplier; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.util.Collections; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static java.util.Collections.singletonList; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; - -public class TransportBootstrapClusterActionTests extends ESTestCase { - - private static final ActionFilters EMPTY_FILTERS = new ActionFilters(emptySet()); - - private DiscoveryNode discoveryNode; - private static ThreadPool threadPool; - private TransportService transportService; - private Coordinator coordinator; - - private static BootstrapClusterRequest exampleRequest() { - return new BootstrapClusterRequest(new BootstrapConfiguration(singletonList(new NodeDescription("id", "name")))); - } - - @BeforeClass - public static void createThreadPool() { - threadPool = new TestThreadPool("test", Settings.EMPTY); - } - - @AfterClass - public static void shutdownThreadPool() { - threadPool.shutdown(); - } - - @Before - public void setupTest() { - discoveryNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT); - final MockTransport transport = new MockTransport(); - transportService = transport.createTransportService(Settings.EMPTY, threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> discoveryNode, null, emptySet()); - - final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - coordinator = new Coordinator("local", Settings.EMPTY, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(Settings.EMPTY), - new MasterService("local", Settings.EMPTY, threadPool), - () -> new InMemoryPersistedState(0, ClusterState.builder(new ClusterName("cluster")).build()), r -> emptyList(), - new NoOpClusterApplier(), Collections.emptyList(), new Random(random().nextLong())); - } - - public void testHandlesNonstandardDiscoveryImplementation() throws InterruptedException { - final Discovery discovery = mock(Discovery.class); - verifyZeroInteractions(discovery); - - final String nonstandardDiscoveryType = randomFrom(DiscoveryModule.ZEN_DISCOVERY_TYPE, "single-node", "unknown"); - new TransportBootstrapClusterAction( - Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), nonstandardDiscoveryType).build(), - EMPTY_FILTERS, transportService, discovery); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, exampleRequest(), new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), equalTo("cluster bootstrapping is not supported by discovery type [" + - nonstandardDiscoveryType + "]")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsOnNonMasterEligibleNodes() throws InterruptedException { - discoveryNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - // transport service only picks up local node when started, so we can change it here ^ - - new TransportBootstrapClusterAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, exampleRequest(), new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), - equalTo("this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testSetsInitialConfiguration() throws InterruptedException { - new TransportBootstrapClusterAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - assertFalse(coordinator.isInitialConfigurationSet()); - - final BootstrapClusterRequest request - = new BootstrapClusterRequest(new BootstrapConfiguration(singletonList(new NodeDescription(discoveryNode)))); - - { - final int parallelRequests = 10; - final CountDownLatch countDownLatch = new CountDownLatch(parallelRequests); - final AtomicInteger successes = new AtomicInteger(); - - for (int i = 0; i < parallelRequests; i++) { - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, request, new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - if (response.getAlreadyBootstrapped() == false) { - successes.incrementAndGet(); - } - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - } - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - assertThat(successes.get(), equalTo(1)); - } - - assertTrue(coordinator.isInitialConfigurationSet()); - - { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(discoveryNode, BootstrapClusterAction.NAME, request, new ResponseHandler() { - @Override - public void handleResponse(BootstrapClusterResponse response) { - assertTrue(response.getAlreadyBootstrapped()); - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - } - - private abstract class ResponseHandler implements TransportResponseHandler { - @Override - public String executor() { - return Names.SAME; - } - - @Override - public BootstrapClusterResponse read(StreamInput in) throws IOException { - return new BootstrapClusterResponse(in); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java deleted file mode 100644 index 6d94dcf6eca14..0000000000000 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/bootstrap/TransportGetDiscoveredNodesActionTests.java +++ /dev/null @@ -1,533 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.action.admin.cluster.bootstrap; - -import org.elasticsearch.ElasticsearchTimeoutException; -import org.elasticsearch.Version; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ESAllocationTestCase; -import org.elasticsearch.cluster.coordination.ClusterAlreadyBootstrappedException; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; -import org.elasticsearch.cluster.coordination.CoordinationMetaData; -import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; -import org.elasticsearch.cluster.coordination.Coordinator; -import org.elasticsearch.cluster.coordination.InMemoryPersistedState; -import org.elasticsearch.cluster.coordination.NoOpClusterApplier; -import org.elasticsearch.cluster.coordination.PeersResponse; -import org.elasticsearch.cluster.coordination.PublicationTransportHandler; -import org.elasticsearch.cluster.coordination.PublishWithJoinResponse; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.ClusterSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryModule; -import org.elasticsearch.discovery.PeersRequest; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.BytesTransportRequest; -import org.elasticsearch.transport.TransportException; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportResponseHandler; -import org.elasticsearch.transport.TransportService; -import org.elasticsearch.transport.TransportService.HandshakeResponse; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Collections; -import java.util.EnumSet; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; - -import static java.util.Collections.emptyList; -import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; -import static java.util.Collections.singleton; -import static java.util.Collections.singletonList; -import static org.elasticsearch.cluster.ClusterName.CLUSTER_NAME_SETTING; -import static org.elasticsearch.discovery.PeerFinder.REQUEST_PEERS_ACTION_NAME; -import static org.elasticsearch.transport.TransportService.HANDSHAKE_ACTION_NAME; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.startsWith; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verifyZeroInteractions; - -public class TransportGetDiscoveredNodesActionTests extends ESTestCase { - - private static final ActionFilters EMPTY_FILTERS = new ActionFilters(emptySet()); - - private static ThreadPool threadPool; - private DiscoveryNode localNode; - private String clusterName; - private TransportService transportService; - private Coordinator coordinator; - private DiscoveryNode otherNode; - - @BeforeClass - public static void createThreadPool() { - threadPool = new TestThreadPool("test", Settings.EMPTY); - } - - @AfterClass - public static void shutdownThreadPool() { - threadPool.shutdown(); - } - - @Before - public void setupTest() { - clusterName = randomAlphaOfLength(10); - localNode = new DiscoveryNode( - "node1", "local", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); - otherNode = new DiscoveryNode( - "node2", "other", buildNewFakeTransportAddress(), emptyMap(), EnumSet.allOf(DiscoveryNode.Role.class), Version.CURRENT); - - final MockTransport transport = new MockTransport() { - @Override - protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { - if (action.equals(HANDSHAKE_ACTION_NAME) && node.getAddress().equals(otherNode.getAddress())) { - handleResponse(requestId, new HandshakeResponse(otherNode, new ClusterName(clusterName), Version.CURRENT)); - } - } - }; - transportService = transport.createTransportService( - Settings.builder().put(CLUSTER_NAME_SETTING.getKey(), clusterName).build(), threadPool, - TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, emptySet()); - - final Settings settings = Settings.builder() - .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), - ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap - - final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); - coordinator = new Coordinator("local", settings, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(settings), - new MasterService("local", settings, threadPool), - () -> new InMemoryPersistedState(0, ClusterState.builder(new ClusterName(clusterName)).build()), r -> emptyList(), - new NoOpClusterApplier(), Collections.emptyList(), new Random(random().nextLong())); - } - - public void testHandlesNonstandardDiscoveryImplementation() throws InterruptedException { - final Discovery discovery = mock(Discovery.class); - verifyZeroInteractions(discovery); - - final String nonstandardDiscoveryType = randomFrom(DiscoveryModule.ZEN_DISCOVERY_TYPE, "single-node", "unknown"); - new TransportGetDiscoveredNodesAction( - Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), nonstandardDiscoveryType).build(), - EMPTY_FILTERS, transportService, discovery); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, new GetDiscoveredNodesRequest(), new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), equalTo("discovered nodes are not exposed by discovery type [" + - nonstandardDiscoveryType + "]")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsOnMasterIneligibleNodes() throws InterruptedException { - localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - // transport service only picks up local node when started, so we can change it here ^ - - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, new GetDiscoveredNodesRequest(), new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(IllegalArgumentException.class)); - assertThat(rootCause.getMessage(), - equalTo("this node is not master-eligible, but discovered nodes are only exposed by master-eligible nodes")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsQuicklyWithZeroTimeoutAndAcceptsNullTimeout() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - } - - { - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - final Throwable rootCause = exp.getRootCause(); - assertThat(rootCause, instanceOf(ElasticsearchTimeoutException.class)); - assertThat(rootCause.getMessage(), startsWith("timed out while waiting for GetDiscoveredNodesRequest{")); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - } - - public void testFailsIfAlreadyBootstrapped() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - coordinator.setInitialConfiguration(new VotingConfiguration(singleton(localNode.getId()))); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - if (exp.getRootCause() instanceof ClusterAlreadyBootstrappedException) { - countDownLatch.countDown(); - } else { - throw new AssertionError("should not be called", exp); - } - } - }); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testFailsIfAcceptsClusterStateWithNonemptyConfiguration() throws InterruptedException, IOException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(null); - getDiscoveredNodesRequest.setRequiredNodes(singletonList("not-a-node")); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - if (exp.getRootCause() instanceof ClusterAlreadyBootstrappedException) { - countDownLatch.countDown(); - } else { - throw new AssertionError("should not be called", exp); - } - } - }); - - ClusterState.Builder publishedClusterState = ClusterState.builder(ClusterName.DEFAULT); - publishedClusterState.incrementVersion(); - publishedClusterState.nodes(DiscoveryNodes.builder() - .add(localNode).add(otherNode).localNodeId(localNode.getId()).masterNodeId(otherNode.getId())); - publishedClusterState.metaData(MetaData.builder().coordinationMetaData(CoordinationMetaData.builder() - .term(1) - .lastAcceptedConfiguration(new VotingConfiguration(singleton(otherNode.getId()))) - .lastCommittedConfiguration(new VotingConfiguration(singleton(otherNode.getId()))) - .build())); - - transportService.sendRequest(localNode, PublicationTransportHandler.PUBLISH_STATE_ACTION_NAME, - new BytesTransportRequest(PublicationTransportHandler.serializeFullClusterState(publishedClusterState.build(), Version.CURRENT), - Version.CURRENT), - new TransportResponseHandler() { - @Override - public void handleResponse(PublishWithJoinResponse response) { - // do nothing - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - - @Override - public String executor() { - return Names.SAME; - } - - @Override - public PublishWithJoinResponse read(StreamInput in) throws IOException { - return new PublishWithJoinResponse(in); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - public void testGetsDiscoveredNodesWithZeroTimeout() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByAddress() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), otherNode.getAddress().toString())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByName() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getName(), otherNode.getName())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - public void testGetsDiscoveredNodesByIP() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - String ip = localNode.getAddress().getAddress(); - getDiscoveredNodesRequest.setRequiredNodes(Collections.singletonList(ip)); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, '[' + ip + "] matches ["); - } - - public void testGetsDiscoveredNodesDuplicateName() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - String name = localNode.getName(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(name, name)); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, "[" + localNode + "] matches [" + name + ", " + name + ']'); - } - - public void testGetsDiscoveredNodesWithDuplicateMatchNameAndAddress() throws InterruptedException { - setupGetDiscoveredNodesAction(); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), localNode.getName())); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - assertWaitConditionFailedOnDuplicate(getDiscoveredNodesRequest, "[" + localNode + "] matches ["); - } - - public void testGetsDiscoveredNodesTimeoutOnMissing() throws InterruptedException { - setupGetDiscoveredNodesAction(); - - final CountDownLatch latch = new CountDownLatch(1); - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getAddress().toString(), "_missing")); - getDiscoveredNodesRequest.setTimeout(TimeValue.ZERO); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - assertThat(exp.getRootCause(), instanceOf(ElasticsearchTimeoutException.class)); - latch.countDown(); - } - }); - - latch.await(10L, TimeUnit.SECONDS); - } - - public void testThrowsExceptionIfDuplicateDiscoveredLater() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - final String ip = localNode.getAddress().getAddress(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(ip, "not-a-node")); - - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - Throwable t = exp.getRootCause(); - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), startsWith('[' + ip + "] matches [")); - countDownLatch.countDown(); - } - }); - - executeRequestPeersAction(); - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private void executeRequestPeersAction() { - threadPool.generic().execute(() -> - transportService.sendRequest(localNode, REQUEST_PEERS_ACTION_NAME, new PeersRequest(otherNode, emptyList()), - new TransportResponseHandler() { - @Override - public PeersResponse read(StreamInput in) throws IOException { - return new PeersResponse(in); - } - - @Override - public void handleResponse(PeersResponse response) { - } - - @Override - public void handleException(TransportException exp) { - } - - @Override - public String executor() { - return Names.SAME; - } - })); - } - - private void setupGetDiscoveredNodesAction() throws InterruptedException { - new TransportGetDiscoveredNodesAction(Settings.EMPTY, EMPTY_FILTERS, transportService, coordinator); // registers action - transportService.start(); - transportService.acceptIncomingRequests(); - coordinator.start(); - coordinator.startInitialJoin(); - - executeRequestPeersAction(); - - final GetDiscoveredNodesRequest getDiscoveredNodesRequest = new GetDiscoveredNodesRequest(); - getDiscoveredNodesRequest.setRequiredNodes(Arrays.asList(localNode.getName(), otherNode.getName())); - assertWaitConditionMet(getDiscoveredNodesRequest); - } - - private void assertWaitConditionMet(GetDiscoveredNodesRequest getDiscoveredNodesRequest) throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - assertThat(response.getNodes(), containsInAnyOrder(localNode, otherNode)); - countDownLatch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - throw new AssertionError("should not be called", exp); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private void assertWaitConditionFailedOnDuplicate(GetDiscoveredNodesRequest getDiscoveredNodesRequest, String message) - throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - transportService.sendRequest(localNode, GetDiscoveredNodesAction.NAME, getDiscoveredNodesRequest, new ResponseHandler() { - @Override - public void handleResponse(GetDiscoveredNodesResponse response) { - throw new AssertionError("should not be called"); - } - - @Override - public void handleException(TransportException exp) { - Throwable t = exp.getRootCause(); - assertThat(t, instanceOf(IllegalArgumentException.class)); - assertThat(t.getMessage(), startsWith(message)); - countDownLatch.countDown(); - } - }); - - assertTrue(countDownLatch.await(10, TimeUnit.SECONDS)); - } - - private abstract class ResponseHandler implements TransportResponseHandler { - @Override - public String executor() { - return Names.SAME; - } - - @Override - public GetDiscoveredNodesResponse read(StreamInput in) throws IOException { - return new GetDiscoveredNodesResponse(in); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java index 542247c058861..46a43afa53897 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -20,49 +20,52 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterAction; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapClusterResponse; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration.NodeDescription; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesAction; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesRequest; -import org.elasticsearch.action.admin.cluster.bootstrap.GetDiscoveredNodesResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.Settings.Builder; -import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransport; -import org.elasticsearch.threadpool.ThreadPool.Names; -import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; import org.junit.Before; -import java.util.Set; +import java.util.Collections; +import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING; import static org.elasticsearch.common.settings.Settings.builder; import static org.elasticsearch.discovery.DiscoveryModule.DISCOVERY_HOSTS_PROVIDER_SETTING; import static org.elasticsearch.discovery.zen.SettingsBasedHostsProvider.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; public class ClusterBootstrapServiceTests extends ESTestCase { private DiscoveryNode localNode, otherNode1, otherNode2; private DeterministicTaskQueue deterministicTaskQueue; private TransportService transportService; - private ClusterBootstrapService clusterBootstrapService; @Before public void createServices() { @@ -81,10 +84,6 @@ protected void onSendRequest(long requestId, String action, TransportRequest req transportService = transport.createTransportService(Settings.EMPTY, deterministicTaskQueue.getThreadPool(), TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundTransportAddress -> localNode, null, emptySet()); - - clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), - localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), - transportService); } private DiscoveryNode newDiscoveryNode(String nodeName) { @@ -92,152 +91,392 @@ private DiscoveryNode newDiscoveryNode(String nodeName) { Version.CURRENT); } - private void startServices() { - transportService.start(); - transportService.acceptIncomingRequests(); - clusterBootstrapService.start(); - } + public void testBootstrapsAutomaticallyWithDefaultConfiguration() { + final Settings.Builder settings = Settings.builder(); + final long timeout; + if (randomBoolean()) { + timeout = UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(Settings.EMPTY).millis(); + } else { + timeout = randomLongBetween(1, 10000); + settings.put(UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.getKey(), timeout + "ms"); + } - public void testDoesNothingOnNonMasterNodes() { - localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - throw new AssertionError("should not make a discovery request"); - }); + final AtomicReference>> discoveredNodesSupplier = new AtomicReference<>(() -> { + throw new AssertionError("should not be called yet"); + }); - startServices(); - deterministicTaskQueue.runAllTasks(); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService + = new ClusterBootstrapService(settings.build(), transportService, () -> discoveredNodesSupplier.get().get(), + () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), + equalTo(Stream.of(localNode, otherNode1, otherNode2).map(DiscoveryNode::getId).collect(Collectors.toSet()))); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), greaterThanOrEqualTo(timeout)); + }); + + deterministicTaskQueue.scheduleAt(timeout - 1, + () -> discoveredNodesSupplier.set(() -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()))); + + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + deterministicTaskQueue.runAllTasksInTimeOrder(); + assertTrue(bootstrapped.get()); } public void testDoesNothingByDefaultIfHostsProviderConfigured() { - testConfiguredIfSettingSet(builder().putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(DISCOVERY_HOSTS_PROVIDER_SETTING.getKey())); } public void testDoesNothingByDefaultIfUnicastHostsConfigured() { - testConfiguredIfSettingSet(builder().putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey())); } public void testDoesNothingByDefaultIfMasterNodesConfigured() { - testConfiguredIfSettingSet(builder().putList(INITIAL_MASTER_NODES_SETTING.getKey())); + testDoesNothingWithSettings(builder().putList(INITIAL_MASTER_NODES_SETTING.getKey())); } - private void testConfiguredIfSettingSet(Builder builder) { - clusterBootstrapService = new ClusterBootstrapService(builder.build(), transportService); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - throw new AssertionError("should not make a discovery request"); - }); - startServices(); + public void testDoesNothingByDefaultOnMasterIneligibleNodes() { + localNode = new DiscoveryNode("local", randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), emptySet(), + Version.CURRENT); + testDoesNothingWithSettings(Settings.builder()); + } + + private void testDoesNothingWithSettings(Settings.Builder builder) { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(builder.build(), transportService, () -> { + throw new AssertionError("should not be called"); + }, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); deterministicTaskQueue.runAllTasks(); } - public void testBootstrapsAutomaticallyWithDefaultConfiguration() { - clusterBootstrapService = new ClusterBootstrapService(Settings.EMPTY, transportService); + public void testDoesNothingByDefaultIfZen1NodesDiscovered() { + final DiscoveryNode zen1Node = new DiscoveryNode("zen1", buildNewFakeTransportAddress(), singletonMap("zen1", "true"), + singleton(Role.MASTER), Version.CURRENT); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.EMPTY, transportService, () -> + Stream.of(localNode, zen1Node).collect(Collectors.toSet()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + deterministicTaskQueue.runAllTasks(); + } + + + public void testThrowsExceptionOnDuplicates() { + final IllegalArgumentException illegalArgumentException = expectThrows(IllegalArgumentException.class, () -> { + new ClusterBootstrapService(builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), "duplicate-requirement", "duplicate-requirement").build(), + transportService, Collections::emptyList, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + }); - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + assertThat(illegalArgumentException.getMessage(), containsString(INITIAL_MASTER_NODES_SETTING.getKey())); + assertThat(illegalArgumentException.getMessage(), containsString("duplicate-requirement")); + } + public void testBootstrapsOnDiscoveryOfAllRequiredNodes() { final AtomicBoolean bootstrapped = new AtomicBoolean(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - assertThat(request.getBootstrapConfiguration().getNodeDescriptions().stream() - .map(NodeDescription::getId).collect(Collectors.toSet()), - equalTo(discoveredNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()))); - channel.sendResponse(new BootstrapClusterResponse(randomBoolean())); - assertTrue(bootstrapped.compareAndSet(false, true)); - }); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), containsInAnyOrder(localNode.getId(), otherNode1.getId(), otherNode2.getId())); + assertThat(vc.getNodeIds(), not(hasItem(containsString("placeholder")))); + }); - startServices(); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once + } + + public void testBootstrapsOnDiscoveryOfTwoOfThreeRequiredNodes() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> singletonList(otherNode1), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(3)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode1.getId())); + assertThat(vc.getNodeIds(), hasItem(allOf(startsWith(BOOTSTRAP_PLACEHOLDER_PREFIX), containsString(otherNode2.getName())))); + assertTrue(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(singletonList(localNode.getId()))); + assertFalse(vc.hasQuorum(singletonList(otherNode1.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once } - public void testDoesNotRetryOnDiscoveryFailure() { - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - new TransportRequestHandler() { - private boolean called = false; + public void testBootstrapsOnDiscoveryOfThreeOfFiveRequiredNodes() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); - @Override - public void messageReceived(GetDiscoveredNodesRequest request, TransportChannel channel, Task task) { - assert called == false; - called = true; - throw new IllegalArgumentException("simulate failure of discovery request"); - } - }); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName(), + "missing-node-1", "missing-node-2").build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(5)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode1.getId())); + assertThat(vc.getNodeIds(), hasItem(otherNode2.getId())); + + final List placeholders + = vc.getNodeIds().stream().filter(ClusterBootstrapService::isBootstrapPlaceholder).collect(Collectors.toList()); + assertThat(placeholders.size(), equalTo(2)); + assertNotEquals(placeholders.get(0), placeholders.get(1)); + assertThat(placeholders, hasItem(containsString("missing-node-1"))); + assertThat(placeholders, hasItem(containsString("missing-node-2"))); + + assertTrue(vc.hasQuorum(Stream.of(localNode, otherNode1, otherNode2).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + assertFalse(vc.hasQuorum(Stream.of(localNode, otherNode1).map(DiscoveryNode::getId).collect(Collectors.toList()))); + }); - startServices(); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once } - public void testBootstrapsOnDiscoverySuccess() { - final AtomicBoolean discoveryAttempted = new AtomicBoolean(); - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> { - assertTrue(discoveryAttempted.compareAndSet(false, true)); - channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes)); - }); + public void testDoesNotBootstrapIfNoNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, Collections::emptyList, () -> true, vc -> { + throw new AssertionError("should not be called"); + }); - final AtomicBoolean bootstrapAttempted = new AtomicBoolean(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - assertTrue(bootstrapAttempted.compareAndSet(false, true)); - channel.sendResponse(new BootstrapClusterResponse(false)); - }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - startServices(); + public void testDoesNotBootstrapIfTwoOfFiveNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), + localNode.getName(), otherNode1.getName(), otherNode2.getName(), "not-a-node-1", "not-a-node-2").build(), + transportService, () -> Stream.of(otherNode1).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); + } + + public void testDoesNotBootstrapIfThreeOfSixNodesDiscovered() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), + localNode.getName(), otherNode1.getName(), otherNode2.getName(), "not-a-node-1", "not-a-node-2", "not-a-node-3").build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); - assertTrue(discoveryAttempted.get()); - assertTrue(bootstrapAttempted.get()); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); } - public void testRetriesOnBootstrapFailure() { - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + public void testDoesNotBootstrapIfAlreadyBootstrapped() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> true, vc -> { + throw new AssertionError("should not be called"); + }); - AtomicLong callCount = new AtomicLong(); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> { - callCount.incrementAndGet(); - channel.sendResponse(new ElasticsearchException("simulated exception")); - }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - startServices(); - while (callCount.get() < 5) { - if (deterministicTaskQueue.hasDeferredTasks()) { - deterministicTaskQueue.advanceTime(); - } - deterministicTaskQueue.runAllRunnableTasks(); - } + public void testDoesNotBootstrapsOnNonMasterNode() { + localNode = new DiscoveryNode("local", randomAlphaOfLength(10), buildNewFakeTransportAddress(), emptyMap(), emptySet(), + Version.CURRENT); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> + Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testDoesNotBootstrapsIfNotConfigured() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey()).build(), transportService, + () -> Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.scheduleUnconfiguredBootstrap(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); } - public void testStopsRetryingBootstrapWhenStopped() { - final Set discoveredNodes = Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toSet()); - transportService.registerRequestHandler(GetDiscoveredNodesAction.NAME, Names.SAME, GetDiscoveredNodesRequest::new, - (request, channel, task) -> channel.sendResponse(new GetDiscoveredNodesResponse(discoveredNodes))); + public void testDoesNotBootstrapsIfZen1NodesDiscovered() { + final DiscoveryNode zen1Node = new DiscoveryNode("zen1", buildNewFakeTransportAddress(), singletonMap("zen1", "true"), + singleton(Role.MASTER), Version.CURRENT); - transportService.registerRequestHandler(BootstrapClusterAction.NAME, Names.SAME, BootstrapClusterRequest::new, - (request, channel, task) -> channel.sendResponse(new ElasticsearchException("simulated exception"))); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2, zen1Node).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); - deterministicTaskQueue.scheduleAt(deterministicTaskQueue.getCurrentTimeMillis() + 200000, new Runnable() { - @Override - public void run() { - clusterBootstrapService.stop(); - } + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } - @Override - public String toString() { - return "stop cluster bootstrap service"; + public void testRetriesBootstrappingOnException() { + + final AtomicLong bootstrappingAttempts = new AtomicLong(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + bootstrappingAttempts.incrementAndGet(); + if (bootstrappingAttempts.get() < 5L) { + throw new ElasticsearchException("test"); } }); - startServices(); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertThat(bootstrappingAttempts.get(), greaterThanOrEqualTo(5L)); + assertThat(deterministicTaskQueue.getCurrentTimeMillis(), greaterThanOrEqualTo(40000L)); + } + + public void testCancelsBootstrapIfRequirementMatchesMultipleNodes() { + AtomicReference> discoveredNodes + = new AtomicReference<>(Stream.of(otherNode1, otherNode2).collect(Collectors.toList())); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().getAddress()).build(), + transportService, discoveredNodes::get, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + + discoveredNodes.set(emptyList()); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testCancelsBootstrapIfNodeMatchesMultipleRequirements() { + AtomicReference> discoveredNodes + = new AtomicReference<>(Stream.of(otherNode1, otherNode2).collect(Collectors.toList())); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), otherNode1.getAddress().toString(), otherNode1.getName()) + .build(), + transportService, discoveredNodes::get, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + + discoveredNodes.set(Stream.of(new DiscoveryNode(otherNode1.getName(), randomAlphaOfLength(10), buildNewFakeTransportAddress(), + emptyMap(), singleton(Role.MASTER), Version.CURRENT), + new DiscoveryNode("yet-another-node", randomAlphaOfLength(10), otherNode1.getAddress(), emptyMap(), singleton(Role.MASTER), + Version.CURRENT)).collect(Collectors.toList())); + + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testMatchesOnNodeName() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName()).build(), transportService, + Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testMatchesOnNodeAddress() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().toString()).build(), transportService, + Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testMatchesOnNodeHostAddress() { + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getAddress().getAddress()).build(), + transportService, Collections::emptyList, () -> false, vc -> assertTrue(bootstrapped.compareAndSet(false, true))); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + } + + public void testDoesNotJustMatchEverything() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( + Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey(), randomAlphaOfLength(10)).build(), transportService, + Collections::emptyList, () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + + public void testDoesNotIncludeExtraNodes() { + final DiscoveryNode extraNode = newDiscoveryNode("extra-node"); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), localNode.getName(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> Stream.of(otherNode1, otherNode2, extraNode).collect(Collectors.toList()), () -> false, + vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), not(hasItem(extraNode.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); deterministicTaskQueue.runAllTasks(); - // termination means success + assertTrue(bootstrapped.get()); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java index 129b29e1f21e5..cf8e1737a7708 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelperTests.java @@ -38,6 +38,7 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.emptySet; import static java.util.Collections.singletonList; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.hamcrest.Matchers.equalTo; @@ -245,6 +246,13 @@ public void testDescriptionAfterBootstrapping() { "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", BOOTSTRAP_PLACEHOLDER_PREFIX + "n3"), + emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires 2 nodes with ids [n1, n2], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4"), emptyList(), emptyList(), 0L) .getDescription(), is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + @@ -259,6 +267,20 @@ public void testDescriptionAfterBootstrapping() { "discovery will continue using [] from hosts providers and [" + localNode + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), + emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires at least 3 nodes with ids from [n1, n2, n3, n4], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, "n1", "n2", "n3", + BOOTSTRAP_PLACEHOLDER_PREFIX + "n4", BOOTSTRAP_PLACEHOLDER_PREFIX + "n5"), emptyList(), emptyList(), 0L).getDescription(), + is("master not discovered or elected yet, an election requires 3 nodes with ids [n1, n2, n3], " + + "have discovered [] which is not a quorum; " + + "discovery will continue using [] from hosts providers and [" + localNode + + "] from last-known cluster state; node term 0, last-accepted version 0 in term 0")); + assertThat(new ClusterFormationState(Settings.EMPTY, state(localNode, new String[]{"n1"}, new String[]{"n1"}), emptyList(), emptyList(), 0L).getDescription(), is("master not discovered or elected yet, an election requires a node with id [n1], " + diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 96746f3343e0a..a9ca7d917b9d8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -26,7 +26,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -48,7 +47,6 @@ import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -82,15 +80,16 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Supplier; import java.util.function.UnaryOperator; import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptySet; +import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.BOOTSTRAP_PLACEHOLDER_PREFIX; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.clusterState; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.setValue; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.value; @@ -125,7 +124,6 @@ import static org.hamcrest.Matchers.hasItem; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; @@ -726,70 +724,6 @@ public void testAckListenerReceivesNacksFromFollowerInHigherTerm() { // assertTrue("expected ack from " + follower1, ackCollector.hasAckedSuccessfully(follower1)); } - public void testDiscoveryOfPeersTriggersNotification() { - final Cluster cluster = new Cluster(randomIntBetween(2, 5)); - - // register a listener and then deregister it again to show that it is not called after deregistration - try (Releasable ignored = cluster.getAnyNode().coordinator.withDiscoveryListener(ActionListener.wrap(() -> { - throw new AssertionError("should not be called"); - }))) { - // do nothing - } - - final long startTimeMillis = cluster.deterministicTaskQueue.getCurrentTimeMillis(); - final ClusterNode bootstrapNode = cluster.getAnyNode(); - final AtomicBoolean hasDiscoveredAllPeers = new AtomicBoolean(); - assertFalse(bootstrapNode.coordinator.getFoundPeers().iterator().hasNext()); - try (Releasable ignored = bootstrapNode.coordinator.withDiscoveryListener( - new ActionListener>() { - @Override - public void onResponse(Iterable discoveryNodes) { - int peerCount = 0; - for (final DiscoveryNode discoveryNode : discoveryNodes) { - peerCount++; - } - assertThat(peerCount, lessThan(cluster.size())); - if (peerCount == cluster.size() - 1 && hasDiscoveredAllPeers.get() == false) { - hasDiscoveredAllPeers.set(true); - final long elapsedTimeMillis = cluster.deterministicTaskQueue.getCurrentTimeMillis() - startTimeMillis; - logger.info("--> {} discovered {} peers in {}ms", bootstrapNode.getId(), cluster.size() - 1, elapsedTimeMillis); - assertThat(elapsedTimeMillis, lessThanOrEqualTo(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2)); - } - } - - @Override - public void onFailure(Exception e) { - throw new AssertionError("unexpected", e); - } - })) { - cluster.runFor(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 + randomLongBetween(0, 60000), "discovery phase"); - } - - assertTrue(hasDiscoveredAllPeers.get()); - - final AtomicBoolean receivedAlreadyBootstrappedException = new AtomicBoolean(); - try (Releasable ignored = bootstrapNode.coordinator.withDiscoveryListener( - new ActionListener>() { - @Override - public void onResponse(Iterable discoveryNodes) { - // ignore - } - - @Override - public void onFailure(Exception e) { - if (e instanceof ClusterAlreadyBootstrappedException) { - receivedAlreadyBootstrappedException.set(true); - } else { - throw new AssertionError("unexpected", e); - } - } - })) { - - cluster.stabilise(); - } - assertTrue(receivedAlreadyBootstrappedException.get()); - } - public void testSettingInitialConfigurationTriggersElection() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runFor(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2 + randomLongBetween(0, 60000), "initial discovery phase"); @@ -1271,12 +1205,8 @@ public String toString() { } } else if (rarely()) { final ClusterNode clusterNode = getAnyNode(); - clusterNode.onNode( - () -> { - logger.debug("----> [runRandomly {}] applying initial configuration {} to {}", - thisStep, initialConfiguration, clusterNode.getId()); - clusterNode.coordinator.setInitialConfiguration(initialConfiguration); - }).run(); + logger.debug("----> [runRandomly {}] applying initial configuration on {}", step, clusterNode.getId()); + clusterNode.applyInitialConfiguration(); } else { if (deterministicTaskQueue.hasDeferredTasks() && randomBoolean()) { deterministicTaskQueue.advanceTime(); @@ -1803,11 +1733,18 @@ ClusterState getLastAppliedClusterState() { void applyInitialConfiguration() { onNode(() -> { + final Set nodeIdsWithPlaceholders = new HashSet<>(initialConfiguration.getNodeIds()); + Stream.generate(() -> BOOTSTRAP_PLACEHOLDER_PREFIX + UUIDs.randomBase64UUID(random())) + .limit((Math.max(initialConfiguration.getNodeIds().size(), 2) - 1) / 2) + .forEach(nodeIdsWithPlaceholders::add); + final VotingConfiguration configurationWithPlaceholders = new VotingConfiguration(new HashSet<>( + randomSubsetOf(initialConfiguration.getNodeIds().size(), nodeIdsWithPlaceholders))); try { - coordinator.setInitialConfiguration(initialConfiguration); - logger.info("successfully set initial configuration to {}", initialConfiguration); + coordinator.setInitialConfiguration(configurationWithPlaceholders); + logger.info("successfully set initial configuration to {}", configurationWithPlaceholders); } catch (CoordinationStateRejectedException e) { - logger.info(new ParameterizedMessage("failed to set initial configuration to {}", initialConfiguration), e); + logger.info(new ParameterizedMessage("failed to set initial configuration to {}", + configurationWithPlaceholders), e); } }).run(); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java index 46523234d1cfa..24f97b67c1458 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsServiceTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.bootstrap.BootstrapConfiguration; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryAction; import org.elasticsearch.action.admin.cluster.repositories.put.TransportPutRepositoryAction; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; @@ -48,13 +47,14 @@ import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.action.index.NodeMappingRefreshAction; import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.coordination.ClusterBootstrapService; +import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; import org.elasticsearch.cluster.coordination.CoordinationState; import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.CoordinatorTests; import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; import org.elasticsearch.cluster.coordination.InMemoryPersistedState; import org.elasticsearch.cluster.coordination.MockSinglePrioritizingExecutor; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.cluster.metadata.AliasValidator; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -254,14 +254,10 @@ private void startCluster() { deterministicTaskQueue.advanceTime(); deterministicTaskQueue.runAllRunnableTasks(); - final BootstrapConfiguration bootstrapConfiguration = new BootstrapConfiguration( - testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()) - .map(node -> new BootstrapConfiguration.NodeDescription(node.node)) - .distinct() - .collect(Collectors.toList())); + final VotingConfiguration votingConfiguration = new VotingConfiguration(testClusterNodes.nodes.values().stream().map(n -> n.node) + .filter(DiscoveryNode::isMasterNode).map(DiscoveryNode::getId).collect(Collectors.toSet())); testClusterNodes.nodes.values().stream().filter(n -> n.node.isMasterNode()).forEach( - testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(bootstrapConfiguration) - ); + testClusterNode -> testClusterNode.coordinator.setInitialConfiguration(votingConfiguration)); runUntil( () -> { From 757932a9751757c9c3689032fc4190dbfbe831a6 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 22 Jan 2019 12:04:29 +0100 Subject: [PATCH 03/39] Document that date math is locale independent With this commit we add a note to the API conventions documentation that all date math expressions are resolved independently of any locale. This behavior might be puzzling to users that try to specify a different calendar than a Gregorian calendar. Closes #37330 Relates #37663 --- docs/reference/api-conventions.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index be41c0fdc77e5..96a01bbeb5d9e 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -85,6 +85,9 @@ Where: `date_format`:: is the optional format in which the computed date should be rendered. Defaults to `YYYY.MM.dd`. `time_zone`:: is the optional time zone . Defaults to `utc`. +Date math expressions are resolved locale-independent. Consequently, it is not possible to use any other +calendars than the Gregorian calendar. + You must enclose date math index name expressions within angle brackets, and all special characters should be URI encoded. For example: From 0a93a0358bfed106bf397a24c0590008225ad21f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 22 Jan 2019 12:12:21 +0100 Subject: [PATCH 04/39] Remove deprecated FieldNamesFieldMapper.Builder#index (#37305) The method calls "enabled" in addition to what the super.index() does, but this seems to be done explicitely now in the TypeParsers `parse` method. The removed method has been deprecated since at least 6.0. Also making some of the Builders methods and ctos private since they are only used internally in this class. --- .../index/mapper/FieldNamesFieldMapper.java | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java index 79b2b0c4c67d1..32c44fd5f55a0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/FieldNamesFieldMapper.java @@ -73,21 +73,14 @@ public static class Defaults { } } - public static class Builder extends MetadataFieldMapper.Builder { + private static class Builder extends MetadataFieldMapper.Builder { private boolean enabled = Defaults.ENABLED; - public Builder(MappedFieldType existing) { + private Builder(MappedFieldType existing) { super(Defaults.NAME, existing == null ? Defaults.FIELD_TYPE : existing, Defaults.FIELD_TYPE); } - @Override - @Deprecated - public Builder index(boolean index) { - enabled(index); - return super.index(index); - } - - public Builder enabled(boolean enabled) { + private Builder enabled(boolean enabled) { this.enabled = enabled; return this; } From ef2f5e4a136fff487e3c6f30656d3e222d74ac25 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Tue, 22 Jan 2019 12:48:05 +0100 Subject: [PATCH 05/39] Follow stats api should return a 404 when requesting stats for a non existing index (#37220) Currently it returns an empty response with a 200 response code. Closes #37021 --- .../documentation/CCRDocumentationIT.java | 22 ++++ .../rest-api-spec/test/ccr/follow_stats.yml | 10 ++ .../action/TransportFollowStatsAction.java | 45 +++++--- .../xpack/ccr/FollowStatsIT.java | 105 ++++++++++++++++++ .../TransportFollowStatsActionTests.java | 66 +++++++++++ 5 files changed, 233 insertions(+), 15 deletions(-) create mode 100644 x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java index b05c7a0dde368..1f6373aff6a8c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/CCRDocumentationIT.java @@ -630,6 +630,22 @@ public void onFailure(Exception e) { public void testGetFollowStats() throws Exception { RestHighLevelClient client = highLevelClient(); + { + // Create leader index: + CreateIndexRequest createIndexRequest = new CreateIndexRequest("leader"); + createIndexRequest.settings(Collections.singletonMap("index.soft_deletes.enabled", true)); + CreateIndexResponse response = client.indices().create(createIndexRequest, RequestOptions.DEFAULT); + assertThat(response.isAcknowledged(), is(true)); + } + { + // Follow index, so that we can query for follow stats: + PutFollowRequest putFollowRequest = new PutFollowRequest("local", "leader", "follower"); + PutFollowResponse putFollowResponse = client.ccr().putFollow(putFollowRequest, RequestOptions.DEFAULT); + assertThat(putFollowResponse.isFollowIndexCreated(), is(true)); + assertThat(putFollowResponse.isFollowIndexShardsAcked(), is(true)); + assertThat(putFollowResponse.isIndexFollowingStarted(), is(true)); + } + // tag::ccr-get-follow-stats-request FollowStatsRequest request = new FollowStatsRequest("follower"); // <1> @@ -671,6 +687,12 @@ public void onFailure(Exception e) { // end::ccr-get-follow-stats-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); + + { + PauseFollowRequest pauseFollowRequest = new PauseFollowRequest("follower"); + AcknowledgedResponse pauseFollowResponse = client.ccr().pauseFollow(pauseFollowRequest, RequestOptions.DEFAULT); + assertThat(pauseFollowResponse.isAcknowledged(), is(true)); + } } static Map toMap(Response response) throws IOException { diff --git a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml index aa63c804aba21..5b3e6c18ef29b 100644 --- a/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml +++ b/x-pack/plugin/ccr/qa/rest/src/test/resources/rest-api-spec/test/ccr/follow_stats.yml @@ -43,6 +43,12 @@ - is_true: follow_index_shards_acked - is_true: index_following_started + - do: + ccr.follow_stats: + index: _all + - length: { indices: 1 } + - match: { indices.0.index: "bar" } + # we can not reliably wait for replication to occur so we test the endpoint without indexing any documents - do: ccr.follow_stats: @@ -77,3 +83,7 @@ index: bar - is_true: acknowledged + - do: + catch: missing + ccr.follow_stats: + index: unknown diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java index 8ab66aec8e80b..dc684fbc904ce 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.ccr.action; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; @@ -13,6 +14,7 @@ import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -65,6 +67,15 @@ protected void doExecute( listener.onFailure(LicenseUtils.newComplianceException("ccr")); return; } + + if (Strings.isAllOrWildcard(request.indices()) == false) { + final ClusterState state = clusterService.state(); + Set shardFollowTaskFollowerIndices = findFollowerIndicesFromShardFollowTasks(state, request.indices()); + if (shardFollowTaskFollowerIndices.isEmpty()) { + String resources = String.join(",", request.indices()); + throw new ResourceNotFoundException("No shard follow tasks for follower indices [{}]", resources); + } + } super.doExecute(task, request, listener); } @@ -80,21 +91,7 @@ protected FollowStatsAction.StatsResponses newResponse( @Override protected void processTasks(final FollowStatsAction.StatsRequest request, final Consumer operation) { final ClusterState state = clusterService.state(); - final PersistentTasksCustomMetaData persistentTasksMetaData = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); - if (persistentTasksMetaData == null) { - return; - } - - final Set requestedFollowerIndices = request.indices() != null ? - new HashSet<>(Arrays.asList(request.indices())) : Collections.emptySet(); - final Set followerIndices = persistentTasksMetaData.tasks().stream() - .filter(persistentTask -> persistentTask.getTaskName().equals(ShardFollowTask.NAME)) - .map(persistentTask -> { - ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); - return shardFollowTask.getFollowShardId().getIndexName(); - }) - .filter(followerIndex -> requestedFollowerIndices.isEmpty() || requestedFollowerIndices.contains(followerIndex)) - .collect(Collectors.toSet()); + final Set followerIndices = findFollowerIndicesFromShardFollowTasks(state, request.indices()); for (final Task task : taskManager.getTasks().values()) { if (task instanceof ShardFollowNodeTask) { @@ -114,4 +111,22 @@ protected void taskOperation( listener.onResponse(new FollowStatsAction.StatsResponse(task.getStatus())); } + static Set findFollowerIndicesFromShardFollowTasks(ClusterState state, String[] indices) { + final PersistentTasksCustomMetaData persistentTasksMetaData = state.metaData().custom(PersistentTasksCustomMetaData.TYPE); + if (persistentTasksMetaData == null) { + return Collections.emptySet(); + } + + final Set requestedFollowerIndices = indices != null ? + new HashSet<>(Arrays.asList(indices)) : Collections.emptySet(); + return persistentTasksMetaData.tasks().stream() + .filter(persistentTask -> persistentTask.getTaskName().equals(ShardFollowTask.NAME)) + .map(persistentTask -> { + ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); + return shardFollowTask.getFollowShardId().getIndexName(); + }) + .filter(followerIndex -> Strings.isAllOrWildcard(indices) || requestedFollowerIndices.contains(followerIndex)) + .collect(Collectors.toSet()); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java index 409746f9d851b..1f1c6cd5c64e3 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/FollowStatsIT.java @@ -6,9 +6,12 @@ package org.elasticsearch.xpack.ccr; +import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.close.CloseIndexRequest; +import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; @@ -116,4 +119,106 @@ public void testFollowStatsApiFollowerIndexFiltering() throws Exception { }); } + public void testFollowStatsApiResourceNotFound() throws Exception { + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(0)); + + statsRequest.setIndices(new String[] {"follower1"}); + Exception e = expectThrows(ResourceNotFoundException.class, + () -> client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet()); + assertThat(e.getMessage(), equalTo("No shard follow tasks for follower indices [follower1]")); + + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest.setIndices(new String[] {"follower2"}); + e = expectThrows(ResourceNotFoundException.class, + () -> client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet()); + assertThat(e.getMessage(), equalTo("No shard follow tasks for follower indices [follower2]")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + + public void testFollowStatsApiIncludeShardFollowStatsWithRemovedFollowerIndex() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().admin().indices().delete(new DeleteIndexRequest("follower1")).actionGet()); + + statsRequest = new FollowStatsAction.StatsRequest(); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + + public void testFollowStatsApiIncludeShardFollowStatsWithClosedFollowerIndex() throws Exception { + final String leaderIndexSettings = getIndexSettings(1, 0, + singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true")); + assertAcked(client().admin().indices().prepareCreate("leader1").setSource(leaderIndexSettings, XContentType.JSON)); + ensureGreen("leader1"); + + PutFollowAction.Request followRequest = getPutFollowRequest("leader1", "follower1"); + client().execute(PutFollowAction.INSTANCE, followRequest).get(); + + FollowStatsAction.StatsRequest statsRequest = new FollowStatsAction.StatsRequest(); + FollowStatsAction.StatsResponses response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().admin().indices().close(new CloseIndexRequest("follower1")).actionGet()); + + statsRequest = new FollowStatsAction.StatsRequest(); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + statsRequest = new FollowStatsAction.StatsRequest(); + statsRequest.setIndices(new String[] {"follower1"}); + response = client().execute(FollowStatsAction.INSTANCE, statsRequest).actionGet(); + assertThat(response.getStatsResponses().size(), equalTo(1)); + assertThat(response.getStatsResponses().get(0).status().followerIndex(), equalTo("follower1")); + + assertAcked(client().execute(PauseFollowAction.INSTANCE, new PauseFollowAction.Request("follower1")).actionGet()); + } + } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java new file mode 100644 index 0000000000000..bc8c58f1de7de --- /dev/null +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java @@ -0,0 +1,66 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.ccr.action; + +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.test.ESTestCase; + +import java.util.Collections; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class TransportFollowStatsActionTests extends ESTestCase { + + public void testFindFollowerIndicesFromShardFollowTasks() { + PersistentTasksCustomMetaData.Builder persistentTasks = PersistentTasksCustomMetaData.builder() + .addTask("1", ShardFollowTask.NAME, createShardFollowTask("abc"), null) + .addTask("2", ShardFollowTask.NAME, createShardFollowTask("def"), null); + + ClusterState clusterState = ClusterState.builder(new ClusterName("_cluster")) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, persistentTasks.build()).build()) + .build(); + Set result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, null); + assertThat(result.size(), equalTo(2)); + assertThat(result.contains("abc"), is(true)); + assertThat(result.contains("def"), is(true)); + + result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, new String[]{"def"}); + assertThat(result.size(), equalTo(1)); + assertThat(result.contains("def"), is(true)); + + result = TransportFollowStatsAction.findFollowerIndicesFromShardFollowTasks(clusterState, new String[]{"ghi"}); + assertThat(result.size(), equalTo(0)); + } + + private static ShardFollowTask createShardFollowTask(String followerIndex) { + return new ShardFollowTask( + null, + new ShardId(followerIndex, "", 0), + new ShardId("leader_index", "", 0), + 1024, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + 1, + 1024, + TransportResumeFollowAction.DEFAULT_MAX_READ_REQUEST_SIZE, + 1, + 10240, + new ByteSizeValue(512, ByteSizeUnit.MB), + TimeValue.timeValueMillis(10), + TimeValue.timeValueMillis(10), + Collections.emptyMap() + ); + } + +} From 2a7b7ccf1c6c1503a2a1c5aa5a13dfa9b122625b Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 22 Jan 2019 12:51:03 +0100 Subject: [PATCH 06/39] Use cancel instead of timeout for aborting publications (#37670) When publications were cancelled because a node turned to follower or candidate, it would still show as time out, which can be confusing in the logs. This change adapts the improper call of onTimeout by generalizing it to a cancel method. --- .../cluster/coordination/Coordinator.java | 10 +++--- .../cluster/coordination/Publication.java | 32 +++++++++---------- .../coordination/PublicationTests.java | 4 +-- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 968a9a5f01f7a..084d5cf38f2db 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -446,7 +446,7 @@ void becomeCandidate(String method) { if (mode != Mode.CANDIDATE) { mode = Mode.CANDIDATE; - cancelActivePublication(); + cancelActivePublication("become candidate: " + method); joinAccumulator.close(mode); joinAccumulator = joinHelper.new CandidateJoinAccumulator(); @@ -518,7 +518,7 @@ void becomeFollower(String method, DiscoveryNode leaderNode) { discoveryUpgradeService.deactivate(); clusterFormationFailureHelper.stop(); closePrevotingAndElectionScheduler(); - cancelActivePublication(); + cancelActivePublication("become follower: " + method); preVoteCollector.update(getPreVoteResponse(), leaderNode); if (restartLeaderChecker) { @@ -902,7 +902,7 @@ assert getLocalNode().equals(clusterState.getNodes().get(getLocalNode().getId()) @Override public void run() { synchronized (mutex) { - publication.onTimeout(); + publication.cancel("timed out after " + publishTimeout); } } @@ -958,10 +958,10 @@ public void onFailure(Exception e) { }; } - private void cancelActivePublication() { + private void cancelActivePublication(String reason) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; if (currentPublication.isPresent()) { - currentPublication.get().onTimeout(); + currentPublication.get().cancel(reason); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index 9ec8d562b81ba..6838c2f996ffc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -49,7 +49,7 @@ public abstract class Publication { private Optional applyCommitRequest; // set when state is committed private boolean isCompleted; // set when publication is completed - private boolean timedOut; // set when publication timed out + private boolean cancelled; // set when publication is cancelled public Publication(PublishRequest publishRequest, AckListener ackListener, LongSupplier currentTimeSupplier) { this.publishRequest = publishRequest; @@ -71,17 +71,17 @@ public void start(Set faultyNodes) { publicationTargets.forEach(PublicationTarget::sendPublishRequest); } - public void onTimeout() { + public void cancel(String reason) { if (isCompleted) { return; } - assert timedOut == false; - timedOut = true; + assert cancelled == false; + cancelled = true; if (applyCommitRequest.isPresent() == false) { - logger.debug("onTimeout: [{}] timed out before committing", this); + logger.debug("cancel: [{}] cancelled before committing (reason: {})", this, reason); // fail all current publications - final Exception e = new ElasticsearchException("publication timed out before committing"); + final Exception e = new ElasticsearchException("publication cancelled before committing: " + reason); publicationTargets.stream().filter(PublicationTarget::isActive).forEach(pt -> pt.setFailed(e)); } onPossibleCompletion(); @@ -101,7 +101,7 @@ private void onPossibleCompletion() { return; } - if (timedOut == false) { + if (cancelled == false) { for (final PublicationTarget target : publicationTargets) { if (target.isActive()) { return; @@ -125,8 +125,8 @@ private void onPossibleCompletion() { } // For assertions only: verify that this invariant holds - private boolean publicationCompletedIffAllTargetsInactiveOrTimedOut() { - if (timedOut == false) { + private boolean publicationCompletedIffAllTargetsInactiveOrCancelled() { + if (cancelled == false) { for (final PublicationTarget target : publicationTargets) { if (target.isActive()) { return isCompleted == false; @@ -222,7 +222,7 @@ void sendPublishRequest() { state = PublicationTargetState.SENT_PUBLISH_REQUEST; Publication.this.sendPublishRequest(discoveryNode, publishRequest, new PublishResponseHandler()); // TODO Can this ^ fail with an exception? Target should be failed if so. - assert publicationCompletedIffAllTargetsInactiveOrTimedOut(); + assert publicationCompletedIffAllTargetsInactiveOrCancelled(); } void handlePublishResponse(PublishResponse publishResponse) { @@ -245,7 +245,7 @@ void sendApplyCommit() { state = PublicationTargetState.SENT_APPLY_COMMIT; assert applyCommitRequest.isPresent(); Publication.this.sendApplyCommit(discoveryNode, applyCommitRequest.get(), new ApplyCommitResponseHandler()); - assert publicationCompletedIffAllTargetsInactiveOrTimedOut(); + assert publicationCompletedIffAllTargetsInactiveOrCancelled(); } void setAppliedCommit() { @@ -300,7 +300,7 @@ private class PublishResponseHandler implements ActionListener { if (e.getKey().equals(n2)) { if (timeOut) { - publication.onTimeout(); + publication.cancel("timed out"); } else { e.getValue().onFailure(new TransportException(new Exception("dummy failure"))); } @@ -407,7 +407,7 @@ public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedEx } }); - publication.onTimeout(); + publication.cancel("timed out"); assertTrue(publication.completed); assertTrue(publication.committed); assertEquals(committingNodes, ackListener.await(0L, TimeUnit.SECONDS)); From 228611843c6d165087c2d957df57b94b9d93a143 Mon Sep 17 00:00:00 2001 From: Henning Andersen <33268011+henningandersen@users.noreply.github.com> Date: Tue, 22 Jan 2019 13:27:12 +0100 Subject: [PATCH 07/39] Fail start of non-data node if node has data (#37347) * Fail start of non-data node if node has data Check that nodes started with node.data=false cannot start if they have shard data to avoid (old) indexes being resurrected into the cluster in red status. Issue #27073 --- .../elasticsearch/env/NodeEnvironment.java | 39 +++++++++++ .../elasticsearch/env/NodeEnvironmentIT.java | 67 +++++++++++++++++++ .../env/NodeEnvironmentTests.java | 43 ++++++++++++ .../test/InternalTestCluster.java | 12 +++- 4 files changed, 159 insertions(+), 2 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java diff --git a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java index 2d236b08f79de..2f676eb846770 100644 --- a/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/server/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -59,6 +59,7 @@ import org.elasticsearch.monitor.fs.FsInfo; import org.elasticsearch.monitor.fs.FsProbe; import org.elasticsearch.monitor.jvm.JvmInfo; +import org.elasticsearch.node.Node; import java.io.Closeable; import java.io.IOException; @@ -309,6 +310,11 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce if (DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings)) { ensureAtomicMoveSupported(nodePaths); } + + if (DiscoveryNode.isDataNode(settings) == false) { + ensureNoShardData(nodePaths); + } + success = true; } finally { if (success == false) { @@ -1030,6 +1036,38 @@ private static void ensureAtomicMoveSupported(final NodePath[] nodePaths) throws } } + private void ensureNoShardData(final NodePath[] nodePaths) throws IOException { + List shardDataPaths = new ArrayList<>(); + for (NodePath nodePath : nodePaths) { + Path indicesPath = nodePath.indicesPath; + if (Files.isDirectory(indicesPath)) { + try (DirectoryStream indexStream = Files.newDirectoryStream(indicesPath)) { + for (Path indexPath : indexStream) { + if (Files.isDirectory(indexPath)) { + try (Stream shardStream = Files.list(indexPath)) { + shardStream.filter(this::isShardPath) + .map(Path::toAbsolutePath) + .forEach(shardDataPaths::add); + } + } + } + } + } + } + + if (shardDataPaths.isEmpty() == false) { + throw new IllegalStateException("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data: " + + shardDataPaths); + } + } + + private boolean isShardPath(Path path) { + return Files.isDirectory(path) + && path.getFileName().toString().chars().allMatch(Character::isDigit); + } + /** * Resolve the custom path for a index's shard. * Uses the {@code IndexMetaData.SETTING_DATA_PATH} setting to determine @@ -1140,3 +1178,4 @@ private static void tryWriteTempFile(Path path) throws IOException { } } } + diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java new file mode 100644 index 0000000000000..bdca86858701f --- /dev/null +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentIT.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalTestCluster; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.startsWith; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0) +public class NodeEnvironmentIT extends ESIntegTestCase { + public void testStartFailureOnDataForNonDataNode() throws Exception { + final String indexName = "test-fail-on-data"; + + logger.info("--> starting one node"); + internalCluster().startNode(); + + logger.info("--> creating index"); + prepareCreate(indexName, Settings.builder() + .put("index.number_of_shards", 1) + .put("index.number_of_replicas", 0) + ).get(); + final String indexUUID = resolveIndex(indexName).getUUID(); + + logger.info("--> indexing a simple document"); + client().prepareIndex(indexName, "type1", "1").setSource("field1", "value1").get(); + + logger.info("--> restarting the node with node.data=true"); + internalCluster().restartRandomDataNode(); + + logger.info("--> restarting the node with node.data=false"); + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Node started with node.data=false and existing shard data must fail", + () -> + internalCluster().restartRandomDataNode(new InternalTestCluster.RestartCallback() { + @Override + public Settings onNodeStopped(String nodeName) { + return Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), false).build(); + } + })); + assertThat(ex.getMessage(), containsString(indexUUID)); + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data")); + } +} diff --git a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 55ab02c1dcfb9..2771bc9f243ac 100644 --- a/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/server/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -52,6 +53,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.startsWith; @LuceneTestCase.SuppressFileSystems("ExtrasFS") // TODO: fix test to allow extras public class NodeEnvironmentTests extends ESTestCase { @@ -470,6 +472,47 @@ public void testExistingTempFiles() throws IOException { } } + public void testEnsureNoShardData() throws IOException { + Settings settings = buildEnvSettings(Settings.EMPTY); + Index index = new Index("test", "testUUID"); + + try (NodeEnvironment env = newNodeEnvironment(settings)) { + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve(MetaDataStateFormat.STATE_DIR_NAME)); + } + } + + // build settings using same path.data as original but with node.data=false + Settings noDataSettings = Settings.builder() + .put(settings) + .put(Node.NODE_DATA_SETTING.getKey(), false).build(); + + String shardDataDirName = Integer.toString(randomInt(10)); + Path shardPath; + + // test that we can create data=false env with only meta information + try (NodeEnvironment env = newNodeEnvironment(noDataSettings)) { + for (Path path : env.indexPaths(index)) { + Files.createDirectories(path.resolve(shardDataDirName)); + } + shardPath = env.indexPaths(index)[0]; + } + + IllegalStateException ex = expectThrows(IllegalStateException.class, + "Must fail creating NodeEnvironment on a data path that has shard data if node.data=false", + () -> newNodeEnvironment(noDataSettings).close()); + + assertThat(ex.getMessage(), + containsString(shardPath.resolve(shardDataDirName).toAbsolutePath().toString())); + assertThat(ex.getMessage(), + startsWith("Node is started with " + + Node.NODE_DATA_SETTING.getKey() + + "=false, but has shard data")); + + // test that we can create data=true env + newNodeEnvironment(settings).close(); + } + /** Converts an array of Strings to an array of Paths, adding an additional child if specified */ private Path[] stringsToPaths(String[] strings, String additional) { Path[] locations = new Path[strings.length]; diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 012c574f9e6aa..55e356d093e6e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -1727,8 +1727,16 @@ private void restartNode(NodeAndClient nodeAndClient, RestartCallback callback) removeExclusions(excludedNodeIds); - nodeAndClient.recreateNode(newSettings, () -> rebuildUnicastHostFiles(emptyList())); - nodeAndClient.startNode(); + boolean success = false; + try { + nodeAndClient.recreateNode(newSettings, () -> rebuildUnicastHostFiles(emptyList())); + nodeAndClient.startNode(); + success = true; + } finally { + if (success == false) + nodes.remove(nodeAndClient.name); + } + if (activeDisruptionScheme != null) { activeDisruptionScheme.applyToNode(nodeAndClient.name, this); } From 3fad1eeaed45761d734ece7a1c717dc6c6e6ccd2 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 22 Jan 2019 12:44:45 +0000 Subject: [PATCH 08/39] Un-assign persistent tasks as nodes exit the cluster (#37656) PersistentTasksClusterService decides if a task should be reassigned by checking there is a node in the cluster with the same Id. If a node is restarted PersistentTasksClusterService may not observe the change and decide the task still has a valid assignment because the node's ephemeral Id is not used in that decision. This change un-assigns tasks as the nodes in the cluster change. --- .../coordination/JoinTaskExecutor.java | 2 + .../NodeRemovalClusterStateTaskExecutor.java | 4 +- .../routing/allocation/AllocationService.java | 2 +- .../PersistentTasksCustomMetaData.java | 39 ++++++++ .../PersistentTasksCustomMetaDataTests.java | 93 +++++++++++++++++++ 5 files changed, 138 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java index c4c76d8a8fe74..82eb26d98b97e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTaskExecutor.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.ArrayList; import java.util.Collection; @@ -187,6 +188,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes(ClusterState .blocks(currentState.blocks()) .removeGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID)).build(); logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); + tmpState = PersistentTasksCustomMetaData.deassociateDeadNodes(tmpState); return ClusterState.builder(allocationService.deassociateDeadNodes(tmpState, false, "removed dead nodes on election")); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java index 58d4b04444b2f..9d6051d0ccf03 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeRemovalClusterStateTaskExecutor.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import java.util.List; @@ -91,8 +92,9 @@ public ClusterTasksResult execute(final ClusterState currentState, final L protected ClusterTasksResult getTaskClusterTasksResult(ClusterState currentState, List tasks, ClusterState remainingNodesClusterState) { + ClusterState ptasksDeassociatedState = PersistentTasksCustomMetaData.deassociateDeadNodes(remainingNodesClusterState); final ClusterTasksResult.Builder resultBuilder = ClusterTasksResult.builder().successes(tasks); - return resultBuilder.build(allocationService.deassociateDeadNodes(remainingNodesClusterState, true, describeTasks(tasks))); + return resultBuilder.build(allocationService.deassociateDeadNodes(ptasksDeassociatedState, true, describeTasks(tasks))); } // visible for testing diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 59f43a193ddc8..7acf20185eedd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -425,7 +425,7 @@ private void deassociateDeadNodes(RoutingAllocation allocation) { for (ShardRouting shardRouting : node.copyShards()) { final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); boolean delayed = INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.get(indexMetaData.getSettings()).nanos() > 0; - UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left[" + node.nodeId() + "]", + UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NODE_LEFT, "node_left [" + node.nodeId() + "]", null, 0, allocation.getCurrentNanoTime(), System.currentTimeMillis(), delayed, AllocationStatus.NO_ATTEMPT); allocation.routingNodes().failShard(logger, shardRouting, unassignedInfo, indexMetaData, allocation.changes()); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java index b7a179e41e381..01e3d7450a60d 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetaData.java @@ -62,6 +62,7 @@ public final class PersistentTasksCustomMetaData extends AbstractNamedDiffable> tasks; @@ -119,6 +120,11 @@ public PersistentTasksCustomMetaData(long lastAllocationId, Map PersistentTask getTa return null; } + /** + * Unassign any persistent tasks executing on nodes that are no longer in + * the cluster. If the task's assigment has a non-null executor node and that + * node is no longer in the cluster then the assignment is set to + * {@link #LOST_NODE_ASSIGNMENT} + * + * @param clusterState The clusterstate + * @return If no changes the argument {@code clusterState} is returned else + * a copy with the modified tasks + */ + public static ClusterState deassociateDeadNodes(ClusterState clusterState) { + PersistentTasksCustomMetaData tasks = getPersistentTasksCustomMetaData(clusterState); + if (tasks == null) { + return clusterState; + } + + PersistentTasksCustomMetaData.Builder taskBuilder = PersistentTasksCustomMetaData.builder(tasks); + for (PersistentTask task : tasks.tasks()) { + if (task.getAssignment().getExecutorNode() != null && + clusterState.nodes().nodeExists(task.getAssignment().getExecutorNode()) == false) { + taskBuilder.reassignTask(task.getId(), LOST_NODE_ASSIGNMENT); + } + } + + if (taskBuilder.isChanged() == false) { + return clusterState; + } + + MetaData.Builder metaDataBuilder = MetaData.builder(clusterState.metaData()); + metaDataBuilder.putCustom(TYPE, taskBuilder.build()); + return ClusterState.builder(clusterState).metaData(metaDataBuilder).build(); + } + public static class Assignment { @Nullable private final String executorNode; diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java index 2a180cc12dd19..c25ca3cb7db63 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetaDataTests.java @@ -21,10 +21,14 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.Version; import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData.Custom; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -33,9 +37,11 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -65,6 +71,8 @@ import static org.elasticsearch.test.VersionUtils.getPreviousVersion; import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.sameInstance; public class PersistentTasksCustomMetaDataTests extends AbstractDiffableSerializationTestCase { @@ -307,6 +315,91 @@ public void testFeatureSerialization() throws IOException { assertThat(read.taskMap().keySet(), equalTo(Collections.singleton("test_compatible"))); } + public void testDisassociateDeadNodes_givenNoPersistentTasks() { + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")).build(); + ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState); + assertThat(originalState, sameInstance(returnedState)); + } + + public void testDisassociateDeadNodes_givenAssignedPersistentTask() { + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("node1") + .masterNodeId("node1") + .build(); + + String taskName = "test/task"; + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder() + .addTask("task-id", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("node1", "test assignment")); + + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")) + .nodes(nodes) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) + .build(); + ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState); + assertThat(originalState, sameInstance(returnedState)); + + PersistentTasksCustomMetaData originalTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(originalState); + PersistentTasksCustomMetaData returnedTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(returnedState); + assertEquals(originalTasks, returnedTasks); + } + + public void testDisassociateDeadNodes() { + DiscoveryNodes nodes = DiscoveryNodes.builder() + .add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)) + .localNodeId("node1") + .masterNodeId("node1") + .build(); + + String taskName = "test/task"; + PersistentTasksCustomMetaData.Builder tasksBuilder = PersistentTasksCustomMetaData.builder() + .addTask("assigned-task", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("node1", "test assignment")) + .addTask("task-on-deceased-node", taskName, emptyTaskParams(taskName), + new PersistentTasksCustomMetaData.Assignment("left-the-cluster", "test assignment")); + + ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")) + .nodes(nodes) + .metaData(MetaData.builder().putCustom(PersistentTasksCustomMetaData.TYPE, tasksBuilder.build())) + .build(); + ClusterState returnedState = PersistentTasksCustomMetaData.deassociateDeadNodes(originalState); + assertThat(originalState, not(sameInstance(returnedState))); + + PersistentTasksCustomMetaData originalTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(originalState); + PersistentTasksCustomMetaData returnedTasks = PersistentTasksCustomMetaData.getPersistentTasksCustomMetaData(returnedState); + assertNotEquals(originalTasks, returnedTasks); + + assertEquals(originalTasks.getTask("assigned-task"), returnedTasks.getTask("assigned-task")); + assertNotEquals(originalTasks.getTask("task-on-deceased-node"), returnedTasks.getTask("task-on-deceased-node")); + assertEquals(PersistentTasksCustomMetaData.LOST_NODE_ASSIGNMENT, returnedTasks.getTask("task-on-deceased-node").getAssignment()); + } + + private PersistentTaskParams emptyTaskParams(String taskName) { + return new PersistentTaskParams() { + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) { + return builder; + } + + @Override + public void writeTo(StreamOutput out) { + + } + + @Override + public String getWriteableName() { + return taskName; + } + + @Override + public Version getMinimalSupportedVersion() { + return Version.CURRENT; + } + }; + } + private Assignment randomAssignment() { if (randomBoolean()) { if (randomBoolean()) { From 23ba9008409444a0f1a48f6fd73b545307efe192 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 22 Jan 2019 13:53:10 +0100 Subject: [PATCH 09/39] Publish to masters first (#37673) Prefer publishing to master-eligible nodes first, so that cluster state updates are committed more quickly, and master-eligible nodes also turned more quickly into followers after a leader election. --- .../coordination/FollowersChecker.java | 4 +- .../cluster/coordination/Publication.java | 2 +- .../cluster/node/DiscoveryNodes.java | 9 ++++ .../coordination/FollowersCheckerTests.java | 52 +++++++++++++++++++ .../coordination/PublicationTests.java | 43 ++++++++++++++- .../cluster/node/DiscoveryNodesTests.java | 13 +++++ 6 files changed, 118 insertions(+), 5 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java index b7f99df31c018..641f3941cb3bd 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java @@ -135,7 +135,7 @@ public void setCurrentNodes(DiscoveryNodes discoveryNodes) { followerCheckers.keySet().removeIf(isUnknownNode); faultyNodes.removeIf(isUnknownNode); - for (final DiscoveryNode discoveryNode : discoveryNodes) { + discoveryNodes.mastersFirstStream().forEach(discoveryNode -> { if (discoveryNode.equals(discoveryNodes.getLocalNode()) == false && followerCheckers.containsKey(discoveryNode) == false && faultyNodes.contains(discoveryNode) == false) { @@ -144,7 +144,7 @@ public void setCurrentNodes(DiscoveryNodes discoveryNodes) { followerCheckers.put(discoveryNode, followerChecker); followerChecker.start(); } - } + }); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java index 6838c2f996ffc..4aea820d6d9e0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Publication.java @@ -58,7 +58,7 @@ public Publication(PublishRequest publishRequest, AckListener ackListener, LongS startTime = currentTimeSupplier.getAsLong(); applyCommitRequest = Optional.empty(); publicationTargets = new ArrayList<>(publishRequest.getAcceptedState().getNodes().getNodes().size()); - publishRequest.getAcceptedState().getNodes().iterator().forEachRemaining(n -> publicationTargets.add(new PublicationTarget(n))); + publishRequest.getAcceptedState().getNodes().mastersFirstStream().forEach(n -> publicationTargets.add(new PublicationTarget(n))); } public void start(Set faultyNodes) { diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 2e0e63186917b..02f4d5d93bf4e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -40,6 +40,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.stream.Stream; import java.util.stream.StreamSupport; /** @@ -161,6 +162,14 @@ public ImmutableOpenMap getCoordinatingOnlyNodes() { return nodes.build(); } + /** + * Returns a stream of all nodes, with master nodes at the front + */ + public Stream mastersFirstStream() { + return Stream.concat(StreamSupport.stream(masterNodes.spliterator(), false).map(cur -> cur.value), + StreamSupport.stream(this.spliterator(), false).filter(n -> n.isMasterNode() == false)); + } + /** * Get a node by its id * diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java index 6d985d46ef6e4..4f1016847c887 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/FollowersCheckerTests.java @@ -31,7 +31,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.test.EqualsHashCodeTestUtils.CopyFunction; +import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.test.transport.MockTransport; +import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.TransportException; @@ -41,12 +43,21 @@ import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; import java.util.HashSet; +import java.util.List; +import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; import static java.util.Collections.emptySet; import static org.elasticsearch.cluster.coordination.FollowersChecker.FOLLOWER_CHECK_ACTION_NAME; @@ -62,6 +73,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.mockito.Mockito.mock; public class FollowersCheckerTests extends ESTestCase { @@ -536,6 +548,46 @@ public String executor() { } } + private void testPreferMasterNodes() { + List nodes = randomNodes(10); + DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); + nodes.forEach(dn -> discoNodesBuilder.add(dn)); + DiscoveryNodes discoveryNodes = discoNodesBuilder.localNodeId(nodes.get(0).getId()).build(); + CapturingTransport capturingTransport = new CapturingTransport(); + TransportService transportService = capturingTransport.createTransportService(Settings.EMPTY, mock(ThreadPool.class), + TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> nodes.get(0), null, emptySet()); + final FollowersChecker followersChecker = new FollowersChecker(Settings.EMPTY, transportService, fcr -> { + assert false : fcr; + }, (node, reason) -> { + assert false : node; + }); + followersChecker.setCurrentNodes(discoveryNodes); + List followerTargets = Stream.of(capturingTransport.getCapturedRequestsAndClear()) + .map(cr -> cr.node).collect(Collectors.toList()); + List sortedFollowerTargets = new ArrayList<>(followerTargets); + Collections.sort(sortedFollowerTargets, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedFollowerTargets, followerTargets); + } + + private static List randomNodes(final int numNodes) { + List nodesList = new ArrayList<>(); + for (int i = 0; i < numNodes; i++) { + Map attributes = new HashMap<>(); + if (frequently()) { + attributes.put("custom", randomBoolean() ? "match" : randomAlphaOfLengthBetween(3, 5)); + } + final DiscoveryNode node = newNode(i, attributes, + new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values())))); + nodesList.add(node); + } + return nodesList; + } + + private static DiscoveryNode newNode(int nodeId, Map attributes, Set roles) { + return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, buildNewFakeTransportAddress(), attributes, roles, + Version.CURRENT); + } + private static class ExpectsSuccess implements TransportResponseHandler { private final AtomicBoolean responseReceived = new AtomicBoolean(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java index 6f506bbfc913e..658250bc7a4da 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.cluster.coordination; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.coordination.CoordinationMetaData.VotingConfiguration; @@ -33,10 +34,13 @@ import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportResponse; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -97,8 +101,8 @@ abstract class MockPublication extends Publication { boolean committed; - Map> pendingPublications = new HashMap<>(); - Map> pendingCommits = new HashMap<>(); + Map> pendingPublications = new LinkedHashMap<>(); + Map> pendingCommits = new LinkedHashMap<>(); Map joins = new HashMap<>(); Set missingJoins = new HashSet<>(); @@ -372,6 +376,22 @@ public void testClusterStatePublishingFailsOrTimesOutBeforeCommit() throws Inter tuple.v1().equals(n2) ? "dummy failure" : "non-failed nodes do not form a quorum"))); } + public void testPublishingToMastersFirst() { + VotingConfiguration singleNodeConfig = new VotingConfiguration(Sets.newHashSet(n1.getId())); + initializeCluster(singleNodeConfig); + + DiscoveryNodes.Builder discoNodesBuilder = DiscoveryNodes.builder(); + randomNodes(10).forEach(dn -> discoNodesBuilder.add(dn)); + DiscoveryNodes discoveryNodes = discoNodesBuilder.add(n1).localNodeId(n1.getId()).build(); + MockPublication publication = node1.publish(CoordinationStateTests.clusterState(1L, 2L, + discoveryNodes, singleNodeConfig, singleNodeConfig, 42L), null, Collections.emptySet()); + + List publicationTargets = new ArrayList<>(publication.pendingPublications.keySet()); + List sortedPublicationTargets = new ArrayList<>(publicationTargets); + Collections.sort(sortedPublicationTargets, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedPublicationTargets, publicationTargets); + } + public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedException { VotingConfiguration config = new VotingConfiguration(randomBoolean() ? Sets.newHashSet(n1.getId(), n2.getId()) : Sets.newHashSet(n1.getId(), n2.getId(), n3.getId())); @@ -428,6 +448,25 @@ public void testClusterStatePublishingTimesOutAfterCommit() throws InterruptedEx assertEquals(discoNodes, ackListener.await(0L, TimeUnit.SECONDS)); } + private static List randomNodes(final int numNodes) { + List nodesList = new ArrayList<>(); + for (int i = 0; i < numNodes; i++) { + Map attributes = new HashMap<>(); + if (frequently()) { + attributes.put("custom", randomBoolean() ? "match" : randomAlphaOfLengthBetween(3, 5)); + } + final DiscoveryNode node = newNode(i, attributes, + new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values())))); + nodesList.add(node); + } + return nodesList; + } + + private static DiscoveryNode newNode(int nodeId, Map attributes, Set roles) { + return new DiscoveryNode("name_" + nodeId, "node_" + nodeId, buildNewFakeTransportAddress(), attributes, roles, + Version.CURRENT); + } + public static Collector> shuffle() { return Collectors.collectingAndThen(Collectors.toList(), ts -> { diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java index cf0b1eebe983a..5c92029520e8d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodesTests.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -149,6 +150,18 @@ public void testResolveNodesIds() { assertThat(resolvedNodesIds, equalTo(expectedNodesIds)); } + public void testMastersFirst() { + final List inputNodes = randomNodes(10); + final DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + inputNodes.forEach(discoBuilder::add); + final List returnedNodes = discoBuilder.build().mastersFirstStream().collect(Collectors.toList()); + assertEquals(returnedNodes.size(), inputNodes.size()); + assertEquals(new HashSet<>(returnedNodes), new HashSet<>(inputNodes)); + final List sortedNodes = new ArrayList<>(returnedNodes); + Collections.sort(sortedNodes, Comparator.comparing(n -> n.isMasterNode() == false)); + assertEquals(sortedNodes, returnedNodes); + } + public void testDeltas() { Set nodesA = new HashSet<>(); nodesA.addAll(randomNodes(1 + randomInt(10))); From 7507af29fa0ae8569e3551d7815fc55afe5af014 Mon Sep 17 00:00:00 2001 From: Andrei Stefan Date: Tue, 22 Jan 2019 14:55:28 +0200 Subject: [PATCH 10/39] SQL: Return Intervals in SQL format for CLI (#37602) * Add separate CLI Mode * Use the correct Mode for cursor close requests * Renamed CliFormatter and have different formatting behavior for CLI and "text" format. --- .../xpack/sql/jdbc/JdbcHttpClient.java | 2 +- .../xpack/sql/qa/SqlProtocolTestCase.java | 95 +++++++++++-------- .../xpack/sql/qa/jdbc/JdbcTestUtils.java | 6 +- .../sql/qa/rest/RestSqlUsageTestCase.java | 5 +- ...{CliFormatter.java => BasicFormatter.java} | 52 ++++++---- .../xpack/sql/action/SqlQueryResponse.java | 9 +- .../sql/action/SqlRequestParsersTests.java | 2 +- .../cli/command/ServerQueryCliCommand.java | 15 +-- .../command/ServerQueryCliCommandTests.java | 5 +- .../xpack/sql/client/HttpClient.java | 14 ++- .../elasticsearch/xpack/sql/proto/Mode.java | 1 + .../xpack/sql/proto/RequestInfo.java | 2 - .../xpack/sql/plugin/SqlPlugin.java | 1 + .../xpack/sql/plugin/TextFormat.java | 18 ++-- ...erCursor.java => TextFormatterCursor.java} | 22 ++--- .../xpack/sql/session/Cursors.java | 4 +- ...terTests.java => BasicFormatterTests.java} | 42 ++++---- .../sql/execution/search/CursorTests.java | 20 +++- 18 files changed, 191 insertions(+), 124 deletions(-) rename x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/{CliFormatter.java => BasicFormatter.java} (77%) rename x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/{CliFormatterCursor.java => TextFormatterCursor.java} (78%) rename x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/{CliFormatterTests.java => BasicFormatterTests.java} (63%) diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java index f2a9f9d15343a..73713f91231d6 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcHttpClient.java @@ -68,7 +68,7 @@ Tuple>> nextPage(String cursor, RequestMeta meta) thro } boolean queryClose(String cursor) throws SQLException { - return httpClient.queryClose(cursor); + return httpClient.queryClose(cursor, Mode.JDBC); } InfoResponse serverInfo() throws SQLException { diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java index 94d3de6d30787..e346bfc649b23 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java @@ -31,6 +31,7 @@ import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.proto.RequestInfo.CLIENT_IDS; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; +import static org.elasticsearch.xpack.sql.proto.Mode.CLI; public abstract class SqlProtocolTestCase extends ESRestTestCase { @@ -80,57 +81,71 @@ public void testIPs() throws IOException { } public void testDateTimeIntervals() throws IOException { - assertQuery("SELECT INTERVAL '326' YEAR", "INTERVAL '326' YEAR", "interval_year", "P326Y", 7); - assertQuery("SELECT INTERVAL '50' MONTH", "INTERVAL '50' MONTH", "interval_month", "P50M", 7); - assertQuery("SELECT INTERVAL '520' DAY", "INTERVAL '520' DAY", "interval_day", "PT12480H", 23); - assertQuery("SELECT INTERVAL '163' HOUR", "INTERVAL '163' HOUR", "interval_hour", "PT163H", 23); - assertQuery("SELECT INTERVAL '163' MINUTE", "INTERVAL '163' MINUTE", "interval_minute", "PT2H43M", 23); - assertQuery("SELECT INTERVAL '223.16' SECOND", "INTERVAL '223.16' SECOND", "interval_second", "PT3M43.016S", 23); - assertQuery("SELECT INTERVAL '163-11' YEAR TO MONTH", "INTERVAL '163-11' YEAR TO MONTH", "interval_year_to_month", "P163Y11M", 7); - assertQuery("SELECT INTERVAL '163 12' DAY TO HOUR", "INTERVAL '163 12' DAY TO HOUR", "interval_day_to_hour", "PT3924H", 23); + assertQuery("SELECT INTERVAL '326' YEAR", "INTERVAL '326' YEAR", "interval_year", "P326Y", "+326-0", 7); + assertQuery("SELECT INTERVAL '50' MONTH", "INTERVAL '50' MONTH", "interval_month", "P50M", "+0-50", 7); + assertQuery("SELECT INTERVAL '520' DAY", "INTERVAL '520' DAY", "interval_day", "PT12480H", "+520 00:00:00.0", 23); + assertQuery("SELECT INTERVAL '163' HOUR", "INTERVAL '163' HOUR", "interval_hour", "PT163H", "+6 19:00:00.0", 23); + assertQuery("SELECT INTERVAL '163' MINUTE", "INTERVAL '163' MINUTE", "interval_minute", "PT2H43M", "+0 02:43:00.0", 23); + assertQuery("SELECT INTERVAL '223.16' SECOND", "INTERVAL '223.16' SECOND", "interval_second", "PT3M43.016S", "+0 00:03:43.16", 23); + assertQuery("SELECT INTERVAL '163-11' YEAR TO MONTH", "INTERVAL '163-11' YEAR TO MONTH", "interval_year_to_month", "P163Y11M", + "+163-11", 7); + assertQuery("SELECT INTERVAL '163 12' DAY TO HOUR", "INTERVAL '163 12' DAY TO HOUR", "interval_day_to_hour", "PT3924H", + "+163 12:00:00.0", 23); assertQuery("SELECT INTERVAL '163 12:39' DAY TO MINUTE", "INTERVAL '163 12:39' DAY TO MINUTE", "interval_day_to_minute", - "PT3924H39M", 23); + "PT3924H39M", "+163 12:39:00.0", 23); assertQuery("SELECT INTERVAL '163 12:39:59.163' DAY TO SECOND", "INTERVAL '163 12:39:59.163' DAY TO SECOND", - "interval_day_to_second", "PT3924H39M59.163S", 23); + "interval_day_to_second", "PT3924H39M59.163S", "+163 12:39:59.163", 23); assertQuery("SELECT INTERVAL -'163 23:39:56.23' DAY TO SECOND", "INTERVAL -'163 23:39:56.23' DAY TO SECOND", - "interval_day_to_second", "PT-3935H-39M-56.023S", 23); + "interval_day_to_second", "PT-3935H-39M-56.023S", "-163 23:39:56.23", 23); assertQuery("SELECT INTERVAL '163:39' HOUR TO MINUTE", "INTERVAL '163:39' HOUR TO MINUTE", "interval_hour_to_minute", - "PT163H39M", 23); + "PT163H39M", "+6 19:39:00.0", 23); assertQuery("SELECT INTERVAL '163:39:59.163' HOUR TO SECOND", "INTERVAL '163:39:59.163' HOUR TO SECOND", "interval_hour_to_second", - "PT163H39M59.163S", 23); + "PT163H39M59.163S", "+6 19:39:59.163", 23); assertQuery("SELECT INTERVAL '163:59.163' MINUTE TO SECOND", "INTERVAL '163:59.163' MINUTE TO SECOND", "interval_minute_to_second", - "PT2H43M59.163S", 23); + "PT2H43M59.163S", "+0 02:43:59.163", 23); } - @SuppressWarnings({ "unchecked" }) - private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize) throws IOException { + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize) + throws IOException { + assertQuery(sql, columnName, columnType, columnValue, null, displaySize); + } + + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, Object cliColumnValue, int displaySize) + throws IOException { for (Mode mode : Mode.values()) { - Map response = runSql(mode.toString(), sql); - List columns = (ArrayList) response.get("columns"); - assertEquals(1, columns.size()); + boolean isCliCheck = mode == CLI && cliColumnValue != null; + assertQuery(sql, columnName, columnType, isCliCheck ? cliColumnValue : columnValue, displaySize, mode); + } + } + + @SuppressWarnings({ "unchecked" }) + private void assertQuery(String sql, String columnName, String columnType, Object columnValue, int displaySize, Mode mode) + throws IOException { + Map response = runSql(mode.toString(), sql); + List columns = (ArrayList) response.get("columns"); + assertEquals(1, columns.size()); - Map column = (HashMap) columns.get(0); - assertEquals(columnName, column.get("name")); - assertEquals(columnType, column.get("type")); - if (mode != Mode.PLAIN) { - assertEquals(3, column.size()); - assertEquals(displaySize, column.get("display_size")); - } else { - assertEquals(2, column.size()); - } - - List rows = (ArrayList) response.get("rows"); - assertEquals(1, rows.size()); - List row = (ArrayList) rows.get(0); - assertEquals(1, row.size()); + Map column = (HashMap) columns.get(0); + assertEquals(columnName, column.get("name")); + assertEquals(columnType, column.get("type")); + if (Mode.isDriver(mode)) { + assertEquals(3, column.size()); + assertEquals(displaySize, column.get("display_size")); + } else { + assertEquals(2, column.size()); + } + + List rows = (ArrayList) response.get("rows"); + assertEquals(1, rows.size()); + List row = (ArrayList) rows.get(0); + assertEquals(1, row.size()); - // from xcontent we can get float or double, depending on the conversion - // method of the specific xcontent format implementation - if (columnValue instanceof Float && row.get(0) instanceof Double) { - assertEquals(columnValue, (float)((Number) row.get(0)).doubleValue()); - } else { - assertEquals(columnValue, row.get(0)); - } + // from xcontent we can get float or double, depending on the conversion + // method of the specific xcontent format implementation + if (columnValue instanceof Float && row.get(0) instanceof Double) { + assertEquals(columnValue, (float)((Number) row.get(0)).doubleValue()); + } else { + assertEquals(columnValue, row.get(0)); } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index e91f9fc727238..a5b8f5cb4766a 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.sql.qa.jdbc; import org.apache.logging.log4j.Logger; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.StringUtils; @@ -19,6 +19,8 @@ import java.util.ArrayList; import java.util.List; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; + public abstract class JdbcTestUtils { public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; @@ -131,7 +133,7 @@ public static void logLikeCLI(ResultSet rs, Logger logger) throws SQLException { data.add(entry); } - CliFormatter formatter = new CliFormatter(cols, data); + BasicFormatter formatter = new BasicFormatter(cols, data, CLI); logger.info("\n" + formatter.formatWithHeader(cols, data)); } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java index 5bf400bba0572..4d864961e8ecd 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlUsageTestCase.java @@ -258,9 +258,10 @@ private void runSql(String sql) throws IOException { String mode = Mode.PLAIN.toString(); if (clientType.equals(ClientType.JDBC.toString())) { mode = Mode.JDBC.toString(); - } - if (clientType.startsWith(ClientType.ODBC.toString())) { + } else if (clientType.startsWith(ClientType.ODBC.toString())) { mode = Mode.ODBC.toString(); + } else if (clientType.equals(ClientType.CLI.toString())) { + mode = Mode.CLI.toString(); } runSql(mode, clientType, sql); diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java similarity index 77% rename from x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java rename to x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java index f3f63fd18a275..fec2a3ee621e1 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/CliFormatter.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/BasicFormatter.java @@ -14,26 +14,46 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Objects; +import java.util.function.Function; /** - * Formats {@link SqlQueryResponse} for the CLI. {@linkplain Writeable} so + * Formats {@link SqlQueryResponse} for the CLI and for the TEXT format. {@linkplain Writeable} so * that its state can be saved between pages of results. */ -public class CliFormatter implements Writeable { +public class BasicFormatter implements Writeable { /** * The minimum width for any column in the formatted results. */ private static final int MIN_COLUMN_WIDTH = 15; private int[] width; + + public enum FormatOption { + CLI(Objects::toString), + TEXT(StringUtils::toString); + + private final Function apply; + + FormatOption(Function apply) { + this.apply = l -> l == null ? null : apply.apply(l); + } + + public final String apply(Object l) { + return apply.apply(l); + } + } + + private final FormatOption formatOption; /** - * Create a new {@linkplain CliFormatter} for formatting responses similar + * Create a new {@linkplain BasicFormatter} for formatting responses similar * to the provided columns and rows. */ - public CliFormatter(List columns, List> rows) { + public BasicFormatter(List columns, List> rows, FormatOption formatOption) { // Figure out the column widths: // 1. Start with the widths of the column names + this.formatOption = formatOption; width = new int[columns.size()]; for (int i = 0; i < width.length; i++) { // TODO read the width from the data type? @@ -43,24 +63,24 @@ public CliFormatter(List columns, List> rows) { // 2. Expand columns to fit the largest value for (List row : rows) { for (int i = 0; i < width.length; i++) { - // TODO are we sure toString is correct here? What about dates that come back as longs. - // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 - width[i] = Math.max(width[i], StringUtils.toString(row.get(i)).length()); + width[i] = Math.max(width[i], formatOption.apply(row.get(i)).length()); } } } - public CliFormatter(StreamInput in) throws IOException { + public BasicFormatter(StreamInput in) throws IOException { width = in.readIntArray(); + formatOption = in.readEnum(FormatOption.class); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeIntArray(width); + out.writeEnum(formatOption); } - + /** - * Format the provided {@linkplain SqlQueryResponse} for the CLI + * Format the provided {@linkplain SqlQueryResponse} for the set format * including the header lines. */ public String formatWithHeader(List columns, List> rows) { @@ -103,7 +123,7 @@ public String formatWithHeader(List columns, List> rows } /** - * Format the provided {@linkplain SqlQueryResponse} for the CLI + * Format the provided {@linkplain SqlQueryResponse} for the set format * without the header lines. */ public String formatWithoutHeader(List> rows) { @@ -116,9 +136,7 @@ private String formatWithoutHeader(StringBuilder sb, List> rows) { if (i > 0) { sb.append('|'); } - // TODO are we sure toString is correct here? What about dates that come back as longs. - // Tracked by https://github.com/elastic/x-pack-elasticsearch/issues/3081 - String string = StringUtils.toString(row.get(i)); + String string = formatOption.apply(row.get(i)); if (string.length() <= width[i]) { // Pad sb.append(string); @@ -159,12 +177,12 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - CliFormatter that = (CliFormatter) o; - return Arrays.equals(width, that.width); + BasicFormatter that = (BasicFormatter) o; + return Arrays.equals(width, that.width) && formatOption == that.formatOption; } @Override public int hashCode() { - return Arrays.hashCode(width); + return Objects.hash(width, formatOption); } } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index 7301e86befa02..465b405d01ae8 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -24,6 +24,7 @@ import static java.util.Collections.unmodifiableList; import static org.elasticsearch.xpack.sql.action.AbstractSqlQueryRequest.CURSOR; +import static org.elasticsearch.xpack.sql.proto.Mode.CLI; /** * Response to perform an sql query @@ -36,6 +37,7 @@ public class SqlQueryResponse extends ActionResponse implements ToXContentObject private List columns; // TODO investigate reusing Page here - it probably is much more efficient private List> rows; + private static final String INTERVAL_CLASS_NAME = "Interval"; public SqlQueryResponse() { } @@ -173,7 +175,12 @@ public static XContentBuilder value(XContentBuilder builder, Mode mode, Object v ZonedDateTime zdt = (ZonedDateTime) value; // use the ISO format builder.value(StringUtils.toString(zdt)); - } else { + } else if (mode == CLI && value != null && value.getClass().getSuperclass().getSimpleName().equals(INTERVAL_CLASS_NAME)) { + // use the SQL format for intervals when sending back the response for CLI + // all other clients will receive ISO 8601 formatted intervals + builder.value(value.toString()); + } + else { builder.value(value); } return builder; diff --git a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java index 4e41dddb46c3c..c9bdc8d670c6d 100644 --- a/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java +++ b/x-pack/plugin/sql/sql-action/src/test/java/org/elasticsearch/xpack/sql/action/SqlRequestParsersTests.java @@ -68,7 +68,7 @@ public void testClearCursorRequestParser() throws IOException { request = generateRequest("{\"cursor\" : \"whatever\", \"client_id\" : \"CLI\"}", SqlClearCursorRequest::fromXContent); - assertEquals("cli", request.clientId()); + assertNull(request.clientId()); assertEquals(Mode.PLAIN, request.mode()); assertEquals("whatever", request.getCursor()); diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java index 5cb8bc0a4cd36..86b5cf6c36ef2 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommand.java @@ -5,26 +5,29 @@ */ package org.elasticsearch.xpack.sql.cli.command; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.cli.CliTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.client.JreHttpUrlConnection; +import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.SQLException; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; + public class ServerQueryCliCommand extends AbstractServerCliCommand { @Override protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String line) { SqlQueryResponse response = null; HttpClient cliClient = cliSession.getClient(); - CliFormatter cliFormatter; + BasicFormatter formatter; String data; try { response = cliClient.queryInit(line, cliSession.getFetchSize()); - cliFormatter = new CliFormatter(response.columns(), response.rows()); - data = cliFormatter.formatWithHeader(response.columns(), response.rows()); + formatter = new BasicFormatter(response.columns(), response.rows(), CLI); + data = formatter.formatWithHeader(response.columns(), response.rows()); while (true) { handleText(terminal, data); if (response.cursor().isEmpty()) { @@ -36,7 +39,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l terminal.println(cliSession.getFetchSeparator()); } response = cliSession.getClient().nextPage(response.cursor()); - data = cliFormatter.formatWithoutHeader(response.rows()); + data = formatter.formatWithoutHeader(response.rows()); } } catch (SQLException e) { if (JreHttpUrlConnection.SQL_STATE_BAD_SERVER.equals(e.getSQLState())) { @@ -46,7 +49,7 @@ protected boolean doHandle(CliTerminal terminal, CliSession cliSession, String l } if (response != null) { try { - cliClient.queryClose(response.cursor()); + cliClient.queryClose(response.cursor(), Mode.CLI); } catch (SQLException ex) { terminal.error("Could not close cursor", ex.getMessage()); } diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java index 498bbf7754c8d..9d4ded4a39c14 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerQueryCliCommandTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.sql.cli.TestTerminal; import org.elasticsearch.xpack.sql.client.HttpClient; import org.elasticsearch.xpack.sql.proto.ColumnInfo; +import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.proto.SqlQueryResponse; import java.sql.SQLException; @@ -93,14 +94,14 @@ public void testCursorCleanupOnError() throws Exception { cliSession.setFetchSize(15); when(client.queryInit("test query", 15)).thenReturn(fakeResponse("my_cursor1", true, "first")); when(client.nextPage("my_cursor1")).thenThrow(new SQLException("test exception")); - when(client.queryClose("my_cursor1")).thenReturn(true); + when(client.queryClose("my_cursor1", Mode.CLI)).thenReturn(true); ServerQueryCliCommand cliCommand = new ServerQueryCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "test query")); assertEquals(" field \n---------------\nfirst \n" + "Bad request [test exception]\n", testTerminal.toString()); verify(client, times(1)).queryInit(eq("test query"), eq(15)); verify(client, times(1)).nextPage(any()); - verify(client, times(1)).queryClose(eq("my_cursor1")); + verify(client, times(1)).queryClose(eq("my_cursor1"), eq(Mode.CLI)); verifyNoMoreInteractions(client); } diff --git a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java index 4fe6a39820bc7..c3f35aefd65f4 100644 --- a/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java +++ b/x-pack/plugin/sql/sql-client/src/main/java/org/elasticsearch/xpack/sql/client/HttpClient.java @@ -36,8 +36,6 @@ import java.util.Collections; import java.util.function.Function; -import static org.elasticsearch.xpack.sql.proto.RequestInfo.CLI; - /** * A specialized high-level REST client with support for SQL-related functions. * Similar to JDBC and the underlying HTTP connection, this class is not thread-safe @@ -65,10 +63,10 @@ public MainResponse serverInfo() throws SQLException { public SqlQueryResponse queryInit(String query, int fetchSize) throws SQLException { // TODO allow customizing the time zone - this is what session set/reset/get should be about - // method called only from CLI. "client_id" is set to "cli" + // method called only from CLI SqlQueryRequest sqlRequest = new SqlQueryRequest(query, Collections.emptyList(), null, ZoneId.of("Z"), fetchSize, TimeValue.timeValueMillis(cfg.queryTimeout()), TimeValue.timeValueMillis(cfg.pageTimeout()), - new RequestInfo(Mode.PLAIN, CLI)); + new RequestInfo(Mode.CLI)); return query(sqlRequest); } @@ -77,15 +75,15 @@ public SqlQueryResponse query(SqlQueryRequest sqlRequest) throws SQLException { } public SqlQueryResponse nextPage(String cursor) throws SQLException { - // method called only from CLI. "client_id" is set to "cli" + // method called only from CLI SqlQueryRequest sqlRequest = new SqlQueryRequest(cursor, TimeValue.timeValueMillis(cfg.queryTimeout()), - TimeValue.timeValueMillis(cfg.pageTimeout()), new RequestInfo(Mode.PLAIN, CLI)); + TimeValue.timeValueMillis(cfg.pageTimeout()), new RequestInfo(Mode.CLI)); return post(Protocol.SQL_QUERY_REST_ENDPOINT, sqlRequest, SqlQueryResponse::fromXContent); } - public boolean queryClose(String cursor) throws SQLException { + public boolean queryClose(String cursor, Mode mode) throws SQLException { SqlClearCursorResponse response = post(Protocol.CLEAR_CURSOR_REST_ENDPOINT, - new SqlClearCursorRequest(cursor, new RequestInfo(Mode.PLAIN)), + new SqlClearCursorRequest(cursor, new RequestInfo(mode)), SqlClearCursorResponse::fromXContent); return response.isSucceeded(); } diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java index a301b41d3887a..26eb4867b3509 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/Mode.java @@ -12,6 +12,7 @@ * SQL protocol mode */ public enum Mode { + CLI, PLAIN, JDBC, ODBC; diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java index b860832856a82..97a3622a64f1f 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/RequestInfo.java @@ -13,7 +13,6 @@ import java.util.Set; public class RequestInfo { - public static final String CLI = "cli"; private static final String CANVAS = "canvas"; public static final String ODBC_32 = "odbc32"; private static final String ODBC_64 = "odbc64"; @@ -22,7 +21,6 @@ public class RequestInfo { static { Set clientIds = new HashSet<>(4); - clientIds.add(CLI); clientIds.add(CANVAS); clientIds.add(ODBC_32); clientIds.add(ODBC_64); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java index 9f569206438d2..c80b399d447e6 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlPlugin.java @@ -65,6 +65,7 @@ public class SqlPlugin extends Plugin implements ActionPlugin { } break; case PLAIN: + case CLI: if (licenseState.isSqlAllowed() == false) { throw LicenseUtils.newComplianceException(XPackField.SQL); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java index 34c0f1c6d74f7..62963a99b2a98 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormat.java @@ -7,7 +7,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.rest.RestRequest; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.session.Cursor; @@ -21,6 +21,8 @@ import java.util.Objects; import java.util.function.Function; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.TEXT; + /** * Templating class for displaying SQL responses in text formats. */ @@ -40,21 +42,21 @@ enum TextFormat { PLAIN_TEXT() { @Override String format(Cursor cursor, RestRequest request, SqlQueryResponse response) { - final CliFormatter formatter; - if (cursor instanceof CliFormatterCursor) { - formatter = ((CliFormatterCursor) cursor).getCliFormatter(); + final BasicFormatter formatter; + if (cursor instanceof TextFormatterCursor) { + formatter = ((TextFormatterCursor) cursor).getFormatter(); return formatter.formatWithoutHeader(response.rows()); } else { - formatter = new CliFormatter(response.columns(), response.rows()); + formatter = new BasicFormatter(response.columns(), response.rows(), TEXT); return formatter.formatWithHeader(response.columns(), response.rows()); } } @Override Cursor wrapCursor(Cursor oldCursor, SqlQueryResponse response) { - CliFormatter formatter = (oldCursor instanceof CliFormatterCursor) ? - ((CliFormatterCursor) oldCursor).getCliFormatter() : new CliFormatter(response.columns(), response.rows()); - return CliFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); + BasicFormatter formatter = (oldCursor instanceof TextFormatterCursor) ? + ((TextFormatterCursor) oldCursor).getFormatter() : new BasicFormatter(response.columns(), response.rows(), TEXT); + return TextFormatterCursor.wrap(super.wrapCursor(oldCursor, response), formatter); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java similarity index 78% rename from x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java rename to x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java index b226e899e4d09..4ab1d77fe21ff 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/CliFormatterCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/TextFormatterCursor.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.session.Configuration; import org.elasticsearch.xpack.sql.session.Cursor; import org.elasticsearch.xpack.sql.session.RowSet; @@ -21,31 +21,31 @@ /** * The cursor that wraps all necessary information for textual representation of the result table */ -public class CliFormatterCursor implements Cursor { +public class TextFormatterCursor implements Cursor { public static final String NAME = "f"; private final Cursor delegate; - private final CliFormatter formatter; + private final BasicFormatter formatter; /** * If the newCursor is empty, returns an empty cursor. Otherwise, creates a new - * CliFormatterCursor that wraps the newCursor. + * TextFormatterCursor that wraps the newCursor. */ - public static Cursor wrap(Cursor newCursor, CliFormatter formatter) { + public static Cursor wrap(Cursor newCursor, BasicFormatter formatter) { if (newCursor == EMPTY) { return EMPTY; } - return new CliFormatterCursor(newCursor, formatter); + return new TextFormatterCursor(newCursor, formatter); } - private CliFormatterCursor(Cursor delegate, CliFormatter formatter) { + private TextFormatterCursor(Cursor delegate, BasicFormatter formatter) { this.delegate = delegate; this.formatter = formatter; } - public CliFormatterCursor(StreamInput in) throws IOException { + public TextFormatterCursor(StreamInput in) throws IOException { delegate = in.readNamedWriteable(Cursor.class); - formatter = new CliFormatter(in); + formatter = new BasicFormatter(in); } @Override @@ -54,7 +54,7 @@ public void writeTo(StreamOutput out) throws IOException { formatter.writeTo(out); } - public CliFormatter getCliFormatter() { + public BasicFormatter getFormatter() { return formatter; } @@ -81,7 +81,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) { return false; } - CliFormatterCursor that = (CliFormatterCursor) o; + TextFormatterCursor that = (TextFormatterCursor) o; return Objects.equals(delegate, that.delegate) && Objects.equals(formatter, that.formatter); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java index f8d0393303d64..25989ab0af7d2 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/Cursors.java @@ -19,7 +19,7 @@ import org.elasticsearch.xpack.sql.execution.search.extractor.HitExtractors; import org.elasticsearch.xpack.sql.expression.function.scalar.Processors; import org.elasticsearch.xpack.sql.expression.literal.Intervals; -import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; import java.io.ByteArrayOutputStream; import java.io.OutputStream; @@ -47,7 +47,7 @@ public static List getNamedWriteables() { entries.add(new NamedWriteableRegistry.Entry(Cursor.class, EmptyCursor.NAME, in -> Cursor.EMPTY)); entries.add(new NamedWriteableRegistry.Entry(Cursor.class, ScrollCursor.NAME, ScrollCursor::new)); entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CompositeAggregationCursor.NAME, CompositeAggregationCursor::new)); - entries.add(new NamedWriteableRegistry.Entry(Cursor.class, CliFormatterCursor.NAME, CliFormatterCursor::new)); + entries.add(new NamedWriteableRegistry.Entry(Cursor.class, TextFormatterCursor.NAME, TextFormatterCursor::new)); // plus all their dependencies entries.addAll(Processors.getNamedWriteables()); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java similarity index 63% rename from x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java rename to x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java index e1b551d2aeddb..fcab7eedca801 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/CliFormatterTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/BasicFormatterTests.java @@ -6,28 +6,32 @@ package org.elasticsearch.xpack.sql.action; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; import java.util.Arrays; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; import static org.hamcrest.Matchers.arrayWithSize; -public class CliFormatterTests extends ESTestCase { - private final SqlQueryResponse firstResponse = new SqlQueryResponse("", Mode.PLAIN, +public class BasicFormatterTests extends ESTestCase { + private final FormatOption format = randomFrom(FormatOption.values()); + private final SqlQueryResponse firstResponse = new SqlQueryResponse("", format == CLI ? Mode.CLI : Mode.PLAIN, Arrays.asList( new ColumnInfo("", "foo", "string", 0), new ColumnInfo("", "bar", "long", 15), new ColumnInfo("", "15charwidename!", "double", 25), new ColumnInfo("", "superduperwidename!!!", "double", 25), - new ColumnInfo("", "baz", "keyword", 0)), + new ColumnInfo("", "baz", "keyword", 0), + new ColumnInfo("", "date", "datetime", 24)), Arrays.asList( - Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit"), - Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat"))); - private final CliFormatter formatter = new CliFormatter(firstResponse.columns(), firstResponse.rows()); + Arrays.asList("15charwidedata!", 1, 6.888, 12, "rabbit", "1953-09-02T00:00:00.000Z"), + Arrays.asList("dog", 1.7976931348623157E308, 123124.888, 9912, "goat", "2000-03-15T21:34:37.443Z"))); + private final BasicFormatter formatter = new BasicFormatter(firstResponse.columns(), firstResponse.rows(), format); /** - * Tests for {@link CliFormatter#formatWithHeader}, values + * Tests for {@link BasicFormatter#formatWithHeader}, values * of exactly the minimum column size, column names of exactly * the minimum column size, column headers longer than the * minimum column size, and values longer than the minimum @@ -36,24 +40,30 @@ public class CliFormatterTests extends ESTestCase { public void testFormatWithHeader() { String[] result = formatter.formatWithHeader(firstResponse.columns(), firstResponse.rows()).split("\n"); assertThat(result, arrayWithSize(4)); - assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz ", result[0]); - assertEquals("---------------+----------------------+---------------+---------------------+---------------", result[1]); - assertEquals("15charwidedata!|1 |6.888 |12 |rabbit ", result[2]); - assertEquals("dog |1.7976931348623157E308|123124.888 |9912 |goat ", result[3]); + assertEquals(" foo | bar |15charwidename!|superduperwidename!!!| baz |" + + " date ", result[0]); + assertEquals("---------------+----------------------+---------------+---------------------+---------------+" + + "------------------------", result[1]); + assertEquals("15charwidedata!|1 |6.888 |12 |rabbit |" + + "1953-09-02T00:00:00.000Z", result[2]); + assertEquals("dog |1.7976931348623157E308|123124.888 |9912 |goat |" + + "2000-03-15T21:34:37.443Z", result[3]); } /** - * Tests for {@link CliFormatter#formatWithoutHeader} and + * Tests for {@link BasicFormatter#formatWithoutHeader} and * truncation of long columns. */ public void testFormatWithoutHeader() { String[] result = formatter.formatWithoutHeader( Arrays.asList( - Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat"), - Arrays.asList("dog", 2, 123124.888, 9912, "goat"))).split("\n"); + Arrays.asList("ohnotruncateddata", 4, 1, 77, "wombat", "1955-01-21T01:02:03.342Z"), + Arrays.asList("dog", 2, 123124.888, 9912, "goat", "2231-12-31T23:59:59.999Z"))).split("\n"); assertThat(result, arrayWithSize(2)); - assertEquals("ohnotruncatedd~|4 |1 |77 |wombat ", result[0]); - assertEquals("dog |2 |123124.888 |9912 |goat ", result[1]); + assertEquals("ohnotruncatedd~|4 |1 |77 |wombat |" + + "1955-01-21T01:02:03.342Z", result[0]); + assertEquals("dog |2 |123124.888 |9912 |goat |" + + "2231-12-31T23:59:59.999Z", result[1]); } /** diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java index f3e27d852b3fe..67b0f2badd883 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/CursorTests.java @@ -13,9 +13,9 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.sql.SqlException; import org.elasticsearch.xpack.sql.TestUtils; -import org.elasticsearch.xpack.sql.action.CliFormatter; +import org.elasticsearch.xpack.sql.action.BasicFormatter; import org.elasticsearch.xpack.sql.action.SqlQueryResponse; -import org.elasticsearch.xpack.sql.plugin.CliFormatterCursor; +import org.elasticsearch.xpack.sql.plugin.TextFormatterCursor; import org.elasticsearch.xpack.sql.proto.ColumnInfo; import org.elasticsearch.xpack.sql.proto.Mode; import org.elasticsearch.xpack.sql.session.Cursor; @@ -32,6 +32,8 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; +import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.TEXT; public class CursorTests extends ESTestCase { @@ -79,12 +81,20 @@ static Cursor randomNonEmptyCursor() { () -> { SqlQueryResponse response = createRandomSqlResponse(); if (response.columns() != null && response.rows() != null) { - return CliFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), - new CliFormatter(response.columns(), response.rows())); + return TextFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), + new BasicFormatter(response.columns(), response.rows(), CLI)); + } else { + return ScrollCursorTests.randomScrollCursor(); + } + }, + () -> { + SqlQueryResponse response = createRandomSqlResponse(); + if (response.columns() != null && response.rows() != null) { + return TextFormatterCursor.wrap(ScrollCursorTests.randomScrollCursor(), + new BasicFormatter(response.columns(), response.rows(), TEXT)); } else { return ScrollCursorTests.randomScrollCursor(); } - } ); return cursorSupplier.get(); From 34f2d2ec916909e107a85c26b1cccdda3026aac9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 22 Jan 2019 15:13:52 +0100 Subject: [PATCH 11/39] Remove remaining occurances of "include_type_name=true" in docs (#37646) --- .../metrics/geocentroid-aggregation.asciidoc | 12 +- .../analysis/analyzers/configuring.asciidoc | 20 +- .../pattern-replace-charfilter.asciidoc | 14 +- .../tokenizers/edgengram-tokenizer.asciidoc | 16 +- docs/reference/docs/termvectors.asciidoc | 26 +- docs/reference/how-to/disk-usage.asciidoc | 70 +++--- .../how-to/recipes/stemming.asciidoc | 20 +- docs/reference/how-to/search-speed.asciidoc | 48 ++-- .../index-modules/index-sorting.asciidoc | 38 ++- .../index-modules/similarity.asciidoc | 28 +-- .../ingest/processors/geoip.asciidoc | 12 +- .../mapping/dynamic/field-mapping.asciidoc | 18 +- .../mapping/dynamic/templates.asciidoc | 238 ++++++++---------- .../mapping/fields/field-names-field.asciidoc | 8 +- .../mapping/fields/meta-field.asciidoc | 14 +- .../mapping/fields/routing-field.asciidoc | 8 +- .../mapping/fields/source-field.asciidoc | 30 +-- .../mapping/params/analyzer.asciidoc | 34 ++- docs/reference/mapping/params/boost.asciidoc | 18 +- docs/reference/mapping/params/coerce.asciidoc | 36 ++- .../reference/mapping/params/copy-to.asciidoc | 26 +- .../mapping/params/doc-values.asciidoc | 18 +- .../reference/mapping/params/dynamic.asciidoc | 24 +- .../reference/mapping/params/enabled.asciidoc | 30 +-- .../mapping/params/fielddata.asciidoc | 36 ++- docs/reference/mapping/params/format.asciidoc | 12 +- .../mapping/params/ignore-above.asciidoc | 12 +- .../mapping/params/ignore-malformed.asciidoc | 36 ++- .../mapping/params/index-options.asciidoc | 12 +- .../mapping/params/index-prefixes.asciidoc | 28 +-- .../mapping/params/multi-fields.asciidoc | 34 ++- .../mapping/params/normalizer.asciidoc | 12 +- .../mapping/params/null-value.asciidoc | 12 +- .../params/position-increment-gap.asciidoc | 12 +- .../mapping/params/properties.asciidoc | 30 ++- .../mapping/params/search-analyzer.asciidoc | 14 +- .../mapping/params/similarity.asciidoc | 18 +- docs/reference/mapping/params/store.asciidoc | 26 +- .../mapping/params/term-vector.asciidoc | 12 +- docs/reference/mapping/types/alias.asciidoc | 24 +- docs/reference/mapping/types/binary.asciidoc | 16 +- docs/reference/mapping/types/boolean.asciidoc | 10 +- docs/reference/mapping/types/date.asciidoc | 22 +- .../mapping/types/dense-vector.asciidoc | 16 +- .../mapping/types/feature-vector.asciidoc | 10 +- docs/reference/mapping/types/feature.asciidoc | 18 +- .../mapping/types/geo-point.asciidoc | 10 +- docs/reference/mapping/types/ip.asciidoc | 10 +- docs/reference/mapping/types/keyword.asciidoc | 10 +- docs/reference/mapping/types/nested.asciidoc | 10 +- docs/reference/mapping/types/numeric.asciidoc | 24 +- docs/reference/mapping/types/object.asciidoc | 28 +-- .../mapping/types/parent-join.asciidoc | 62 ++--- .../mapping/types/percolator.asciidoc | 118 ++++----- docs/reference/mapping/types/range.asciidoc | 18 +- .../mapping/types/sparse-vector.asciidoc | 16 +- docs/reference/mapping/types/text.asciidoc | 10 +- .../mapping/types/token-count.asciidoc | 18 +- docs/reference/ml/transforms.asciidoc | 80 +++--- .../reference/query-dsl/exists-query.asciidoc | 12 +- .../query-dsl/feature-query.asciidoc | 24 +- .../query-dsl/geo-bounding-box-query.asciidoc | 14 +- .../query-dsl/geo-distance-query.asciidoc | 14 +- .../query-dsl/geo-shape-query.asciidoc | 20 +- .../reference/query-dsl/nested-query.asciidoc | 10 +- .../query-dsl/parent-id-query.asciidoc | 14 +- .../query-dsl/percolate-query.asciidoc | 18 +- docs/reference/query-dsl/term-query.asciidoc | 16 +- .../query-dsl/terms-set-query.asciidoc | 10 +- .../request/highlighters-internal.asciidoc | 12 +- .../search/request/post-filter.asciidoc | 12 +- docs/reference/search/request/sort.asciidoc | 22 +- .../suggesters/completion-suggest.asciidoc | 16 +- .../suggesters/context-suggest.asciidoc | 74 +++--- .../search/suggesters/phrase-suggest.asciidoc | 26 +- .../authorization/alias-privileges.asciidoc | 2 +- 76 files changed, 867 insertions(+), 1091 deletions(-) diff --git a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc index 0ccd58e0dc63c..af3274c1c09fe 100644 --- a/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/geocentroid-aggregation.asciidoc @@ -7,20 +7,18 @@ Example: [source,js] -------------------------------------------------- -PUT /museums?include_type_name=true +PUT /museums { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "location": { + "type": "geo_point" } } } } -POST /museums/_doc/_bulk?refresh +POST /museums/_bulk?refresh {"index":{"_id":1}} {"location": "52.374081,4.912350", "city": "Amsterdam", "name": "NEMO Science Museum"} {"index":{"_id":2}} diff --git a/docs/reference/analysis/analyzers/configuring.asciidoc b/docs/reference/analysis/analyzers/configuring.asciidoc index f010f2ad6e9b1..994842508e8f5 100644 --- a/docs/reference/analysis/analyzers/configuring.asciidoc +++ b/docs/reference/analysis/analyzers/configuring.asciidoc @@ -8,7 +8,7 @@ to support a list of stop words: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -21,16 +21,14 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "my_text": { - "type": "text", - "analyzer": "standard", <2> - "fields": { - "english": { - "type": "text", - "analyzer": "std_english" <3> - } + "properties": { + "my_text": { + "type": "text", + "analyzer": "standard", <2> + "fields": { + "english": { + "type": "text", + "analyzer": "std_english" <3> } } } diff --git a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc index adfde27138af8..7386af902fbcc 100644 --- a/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc +++ b/docs/reference/analysis/charfilters/pattern-replace-charfilter.asciidoc @@ -49,7 +49,7 @@ replace any embedded dashes in numbers with underscores, i.e `123-456-789` -> [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -100,7 +100,7 @@ camelCase words to be queried individually: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -125,12 +125,10 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "analyzer": "my_analyzer" - } + "properties": { + "text": { + "type": "text", + "analyzer": "my_analyzer" } } } diff --git a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc index 14f94ff21d3dc..2e3ae8b036c3f 100644 --- a/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc +++ b/docs/reference/analysis/tokenizers/edgengram-tokenizer.asciidoc @@ -104,7 +104,7 @@ length `10`: [source,js] ---------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -222,7 +222,7 @@ Below is an example of how to set up a field for _search-as-you-type_: [source,js] ----------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -250,13 +250,11 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "analyzer": "autocomplete", - "search_analyzer": "autocomplete_search" - } + "properties": { + "title": { + "type": "text", + "analyzer": "autocomplete", + "search_analyzer": "autocomplete_search" } } } diff --git a/docs/reference/docs/termvectors.asciidoc b/docs/reference/docs/termvectors.asciidoc index d6227b3c36202..1e2748916005c 100644 --- a/docs/reference/docs/termvectors.asciidoc +++ b/docs/reference/docs/termvectors.asciidoc @@ -125,21 +125,19 @@ First, we create an index that stores term vectors, payloads etc. : [source,js] -------------------------------------------------- -PUT /twitter?include_type_name=true +PUT /twitter { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "term_vector": "with_positions_offsets_payloads", - "store" : true, - "analyzer" : "fulltext_analyzer" - }, - "fullname": { - "type": "text", - "term_vector": "with_positions_offsets_payloads", - "analyzer" : "fulltext_analyzer" - } + "properties": { + "text": { + "type": "text", + "term_vector": "with_positions_offsets_payloads", + "store" : true, + "analyzer" : "fulltext_analyzer" + }, + "fullname": { + "type": "text", + "term_vector": "with_positions_offsets_payloads", + "analyzer" : "fulltext_analyzer" } } }, diff --git a/docs/reference/how-to/disk-usage.asciidoc b/docs/reference/how-to/disk-usage.asciidoc index d58233a7e01cb..713e03d188ad9 100644 --- a/docs/reference/how-to/disk-usage.asciidoc +++ b/docs/reference/how-to/disk-usage.asciidoc @@ -12,15 +12,13 @@ filter on, you can safely disable indexing on this field in your [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "integer", - "index": false - } + "properties": { + "foo": { + "type": "integer", + "index": false } } } @@ -35,15 +33,13 @@ to not write norms to the index: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "norms": false - } + "properties": { + "foo": { + "type": "text", + "norms": false } } } @@ -58,15 +54,13 @@ Elasticsearch to not index positions: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "index_options": "freqs" - } + "properties": { + "foo": { + "type": "text", + "index_options": "freqs" } } } @@ -81,16 +75,14 @@ and scoring will assume that terms appear only once in every document. [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "text", - "norms": false, - "index_options": "freqs" - } + "properties": { + "foo": { + "type": "text", + "norms": false, + "index_options": "freqs" } } } @@ -115,21 +107,19 @@ fields as `keyword`: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "keyword" - } + "dynamic_templates": [ + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "keyword" } } - ] - } + } + ] } } -------------------------------------------------- diff --git a/docs/reference/how-to/recipes/stemming.asciidoc b/docs/reference/how-to/recipes/stemming.asciidoc index c0ec3f2a04c09..e8c213646578c 100644 --- a/docs/reference/how-to/recipes/stemming.asciidoc +++ b/docs/reference/how-to/recipes/stemming.asciidoc @@ -9,7 +9,7 @@ content indexed in two different ways: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "settings": { "analysis": { @@ -24,16 +24,14 @@ PUT index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "body": { - "type": "text", - "analyzer": "english", - "fields": { - "exact": { - "type": "text", - "analyzer": "english_exact" - } + "properties": { + "body": { + "type": "text", + "analyzer": "english", + "fields": { + "exact": { + "type": "text", + "analyzer": "english_exact" } } } diff --git a/docs/reference/how-to/search-speed.asciidoc b/docs/reference/how-to/search-speed.asciidoc index 4ab408be935ac..b136c953b8f02 100644 --- a/docs/reference/how-to/search-speed.asciidoc +++ b/docs/reference/how-to/search-speed.asciidoc @@ -50,22 +50,20 @@ field. [source,js] -------------------------------------------------- -PUT movies?include_type_name=true +PUT movies { "mappings": { - "_doc": { - "properties": { - "name_and_plot": { - "type": "text" - }, - "name": { - "type": "text", - "copy_to": "name_and_plot" - }, - "plot": { - "type": "text", - "copy_to": "name_and_plot" - } + "properties": { + "name_and_plot": { + "type": "text" + }, + "name": { + "type": "text", + "copy_to": "name_and_plot" + }, + "plot": { + "type": "text", + "copy_to": "name_and_plot" } } } @@ -123,14 +121,12 @@ should be mapped as a <>: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "price_range": { - "type": "keyword" - } + "properties": { + "price_range": { + "type": "keyword" } } } @@ -322,15 +318,13 @@ eagerly at refresh-time by configuring mappings as described below: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "keyword", - "eager_global_ordinals": true - } + "properties": { + "foo": { + "type": "keyword", + "eager_global_ordinals": true } } } diff --git a/docs/reference/index-modules/index-sorting.asciidoc b/docs/reference/index-modules/index-sorting.asciidoc index 1a0f8c65dc98b..b4648dd256d3c 100644 --- a/docs/reference/index-modules/index-sorting.asciidoc +++ b/docs/reference/index-modules/index-sorting.asciidoc @@ -14,7 +14,7 @@ For instance the following example shows how to define a sort on a single field: [source,js] -------------------------------------------------- -PUT twitter?include_type_name=true +PUT twitter { "settings" : { "index" : { @@ -23,11 +23,9 @@ PUT twitter?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date" - } + "properties": { + "date": { + "type": "date" } } } @@ -42,7 +40,7 @@ It is also possible to sort the index by more than one field: [source,js] -------------------------------------------------- -PUT twitter?include_type_name=true +PUT twitter { "settings" : { "index" : { @@ -51,15 +49,13 @@ PUT twitter?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "username": { - "type": "keyword", - "doc_values": true - }, - "date": { - "type": "date" - } + "properties": { + "username": { + "type": "keyword", + "doc_values": true + }, + "date": { + "type": "date" } } } @@ -118,7 +114,7 @@ For example, let's say we have an index that contains events sorted by a timesta [source,js] -------------------------------------------------- -PUT events?include_type_name=true +PUT events { "settings" : { "index" : { @@ -127,11 +123,9 @@ PUT events?include_type_name=true } }, "mappings": { - "doc": { - "properties": { - "timestamp": { - "type": "date" - } + "properties": { + "timestamp": { + "type": "date" } } } diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 4d2404c3b52b0..014923d463cbd 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -20,7 +20,7 @@ settings. [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings" : { "index" : { @@ -200,7 +200,7 @@ TF-IDF: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "number_of_shards": 1, @@ -214,12 +214,10 @@ PUT /index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "field": { - "type": "text", - "similarity": "scripted_tfidf" - } + "properties": { + "field": { + "type": "text", + "similarity": "scripted_tfidf" } } } @@ -369,7 +367,7 @@ more efficient: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "number_of_shards": 1, @@ -386,12 +384,10 @@ PUT /index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "field": { - "type": "text", - "similarity": "scripted_tfidf" - } + "properties": { + "field": { + "type": "text", + "similarity": "scripted_tfidf" } } } @@ -537,7 +533,7 @@ it is <>: [source,js] -------------------------------------------------- -PUT /index?include_type_name=true +PUT /index { "settings": { "index": { diff --git a/docs/reference/ingest/processors/geoip.asciidoc b/docs/reference/ingest/processors/geoip.asciidoc index f9f902395c4b2..f38e62806bb9d 100644 --- a/docs/reference/ingest/processors/geoip.asciidoc +++ b/docs/reference/ingest/processors/geoip.asciidoc @@ -196,15 +196,13 @@ You can use the following mapping for the example index above: [source,js] -------------------------------------------------- -PUT my_ip_locations?include_type_name=true +PUT my_ip_locations { "mappings": { - "_doc": { - "properties": { - "geoip": { - "properties": { - "location": { "type": "geo_point" } - } + "properties": { + "geoip": { + "properties": { + "location": { "type": "geo_point" } } } } diff --git a/docs/reference/mapping/dynamic/field-mapping.asciidoc b/docs/reference/mapping/dynamic/field-mapping.asciidoc index c7a96a33473e5..735fddf3f84ab 100644 --- a/docs/reference/mapping/dynamic/field-mapping.asciidoc +++ b/docs/reference/mapping/dynamic/field-mapping.asciidoc @@ -66,12 +66,10 @@ Dynamic date detection can be disabled by setting `date_detection` to `false`: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "date_detection": false - } + "date_detection": false } } @@ -91,12 +89,10 @@ own <>: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_date_formats": ["MM/dd/yyyy"] - } + "dynamic_date_formats": ["MM/dd/yyyy"] } } @@ -119,12 +115,10 @@ correct solution is to map these fields explicitly, but numeric detection [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "numeric_detection": true - } + "numeric_detection": true } } diff --git a/docs/reference/mapping/dynamic/templates.asciidoc b/docs/reference/mapping/dynamic/templates.asciidoc index f10a9d3475b66..8598eab412e79 100644 --- a/docs/reference/mapping/dynamic/templates.asciidoc +++ b/docs/reference/mapping/dynamic/templates.asciidoc @@ -69,35 +69,33 @@ could use the following template: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "integers": { - "match_mapping_type": "long", - "mapping": { - "type": "integer" - } + "dynamic_templates": [ + { + "integers": { + "match_mapping_type": "long", + "mapping": { + "type": "integer" } - }, - { - "strings": { - "match_mapping_type": "string", - "mapping": { - "type": "text", - "fields": { - "raw": { - "type": "keyword", - "ignore_above": 256 - } + } + }, + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "text", + "fields": { + "raw": { + "type": "keyword", + "ignore_above": 256 } } } } - ] - } + } + ] } } @@ -125,23 +123,21 @@ fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "longs_as_strings": { - "match_mapping_type": "string", - "match": "long_*", - "unmatch": "*_text", - "mapping": { - "type": "long" - } + "dynamic_templates": [ + { + "longs_as_strings": { + "match_mapping_type": "string", + "match": "long_*", + "unmatch": "*_text", + "mapping": { + "type": "long" } } - ] - } + } + ] } } @@ -181,23 +177,21 @@ top-level `full_name` field, except for the `middle` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "full_name": { - "path_match": "name.*", - "path_unmatch": "*.middle", - "mapping": { - "type": "text", - "copy_to": "full_name" - } + "dynamic_templates": [ + { + "full_name": { + "path_match": "name.*", + "path_unmatch": "*.middle", + "mapping": { + "type": "text", + "copy_to": "full_name" } } - ] - } + } + ] } } @@ -222,32 +216,30 @@ field, and disables <> for all non-string fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "named_analyzers": { - "match_mapping_type": "string", - "match": "*", - "mapping": { - "type": "text", - "analyzer": "{name}" - } + "dynamic_templates": [ + { + "named_analyzers": { + "match_mapping_type": "string", + "match": "*", + "mapping": { + "type": "text", + "analyzer": "{name}" } - }, - { - "no_doc_values": { - "match_mapping_type":"*", - "mapping": { - "type": "{dynamic_type}", - "doc_values": false - } + } + }, + { + "no_doc_values": { + "match_mapping_type":"*", + "mapping": { + "type": "{dynamic_type}", + "doc_values": false } } - ] - } + } + ] } } @@ -276,21 +268,19 @@ you will have to search on the exact same value that was indexed. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_keywords": { - "match_mapping_type": "string", - "mapping": { - "type": "keyword" - } + "dynamic_templates": [ + { + "strings_as_keywords": { + "match_mapping_type": "string", + "mapping": { + "type": "keyword" } } - ] - } + } + ] } } -------------------------------------------------- @@ -306,21 +296,19 @@ before 5.0): [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_text": { - "match_mapping_type": "string", - "mapping": { - "type": "text" - } + "dynamic_templates": [ + { + "strings_as_text": { + "match_mapping_type": "string", + "mapping": { + "type": "text" } } - ] - } + } + ] } } -------------------------------------------------- @@ -334,28 +322,26 @@ disable the storage of these scoring factors in the index and save some space. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "strings_as_keywords": { - "match_mapping_type": "string", - "mapping": { - "type": "text", - "norms": false, - "fields": { - "keyword": { - "type": "keyword", - "ignore_above": 256 - } + "dynamic_templates": [ + { + "strings_as_keywords": { + "match_mapping_type": "string", + "mapping": { + "type": "text", + "norms": false, + "fields": { + "keyword": { + "type": "keyword", + "ignore_above": 256 } } } } - ] - } + } + ] } } -------------------------------------------------- @@ -375,31 +361,29 @@ maybe gain some indexing speed: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic_templates": [ - { - "unindexed_longs": { - "match_mapping_type": "long", - "mapping": { - "type": "long", - "index": false - } + "dynamic_templates": [ + { + "unindexed_longs": { + "match_mapping_type": "long", + "mapping": { + "type": "long", + "index": false } - }, - { - "unindexed_doubles": { - "match_mapping_type": "double", - "mapping": { - "type": "float", <1> - "index": false - } + } + }, + { + "unindexed_doubles": { + "match_mapping_type": "double", + "mapping": { + "type": "float", <1> + "index": false } } - ] - } + } + ] } } -------------------------------------------------- diff --git a/docs/reference/mapping/fields/field-names-field.asciidoc b/docs/reference/mapping/fields/field-names-field.asciidoc index acc401cf0ec78..c455c55f5ea7f 100644 --- a/docs/reference/mapping/fields/field-names-field.asciidoc +++ b/docs/reference/mapping/fields/field-names-field.asciidoc @@ -21,13 +21,11 @@ execute `exists` queries using those fields you might want to disable [source,js] -------------------------------------------------- -PUT tweets?include_type_name=true +PUT tweets { "mappings": { - "_doc": { - "_field_names": { - "enabled": false - } + "_field_names": { + "enabled": false } } } diff --git a/docs/reference/mapping/fields/meta-field.asciidoc b/docs/reference/mapping/fields/meta-field.asciidoc index bdfa7d94d2c0c..b2225dba4e810 100644 --- a/docs/reference/mapping/fields/meta-field.asciidoc +++ b/docs/reference/mapping/fields/meta-field.asciidoc @@ -7,16 +7,14 @@ metadata, such as the class that a document belongs to: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "_meta": { <1> - "class": "MyApp::User", - "version": { - "min": "1.0", - "max": "1.3" - } + "_meta": { <1> + "class": "MyApp::User", + "version": { + "min": "1.0", + "max": "1.3" } } } diff --git a/docs/reference/mapping/fields/routing-field.asciidoc b/docs/reference/mapping/fields/routing-field.asciidoc index ca823b2430b12..6c75b91b522a9 100644 --- a/docs/reference/mapping/fields/routing-field.asciidoc +++ b/docs/reference/mapping/fields/routing-field.asciidoc @@ -79,13 +79,11 @@ custom `routing` value required for all CRUD operations: [source,js] ------------------------------ -PUT my_index2?include_type_name=true +PUT my_index2 { "mappings": { - "_doc": { - "_routing": { - "required": true <1> - } + "_routing": { + "required": true <1> } } } diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index 8f7943ebc0ff6..757fc0fa5b662 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -13,13 +13,11 @@ within the index. For this reason, it can be disabled as follows: [source,js] -------------------------------------------------- -PUT tweets?include_type_name=true +PUT tweets { "mappings": { - "_doc": { - "_source": { - "enabled": false - } + "_source": { + "enabled": false } } } @@ -85,20 +83,18 @@ as follows: [source,js] -------------------------------------------------- -PUT logs?include_type_name=true +PUT logs { "mappings": { - "_doc": { - "_source": { - "includes": [ - "*.count", - "meta.*" - ], - "excludes": [ - "meta.description", - "meta.other.*" - ] - } + "_source": { + "includes": [ + "*.count", + "meta.*" + ], + "excludes": [ + "meta.description", + "meta.other.*" + ] } } } diff --git a/docs/reference/mapping/params/analyzer.asciidoc b/docs/reference/mapping/params/analyzer.asciidoc index 43cfb7082796f..b2652bf999a77 100644 --- a/docs/reference/mapping/params/analyzer.asciidoc +++ b/docs/reference/mapping/params/analyzer.asciidoc @@ -41,18 +41,16 @@ in the field mapping, as follows: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc": { - "properties": { - "text": { <1> - "type": "text", - "fields": { - "english": { <2> - "type": "text", - "analyzer": "english" - } + "properties": { + "text": { <1> + "type": "text", + "fields": { + "english": { <2> + "type": "text", + "analyzer": "english" } } } @@ -93,7 +91,7 @@ To disable stop words for phrases a field utilising three analyzer settings will [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings":{ "analysis":{ @@ -123,14 +121,12 @@ PUT my_index?include_type_name=true } }, "mappings":{ - "_doc":{ - "properties":{ - "title": { - "type":"text", - "analyzer":"my_analyzer", <3> - "search_analyzer":"my_stop_analyzer", <4> - "search_quote_analyzer":"my_analyzer" <5> - } + "properties":{ + "title": { + "type":"text", + "analyzer":"my_analyzer", <3> + "search_analyzer":"my_stop_analyzer", <4> + "search_quote_analyzer":"my_analyzer" <5> } } } diff --git a/docs/reference/mapping/params/boost.asciidoc b/docs/reference/mapping/params/boost.asciidoc index c6f6d104c0ca0..7da03a66ac44e 100644 --- a/docs/reference/mapping/params/boost.asciidoc +++ b/docs/reference/mapping/params/boost.asciidoc @@ -6,18 +6,16 @@ Individual fields can be _boosted_ automatically -- count more towards the relev [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "boost": 2 <1> - }, - "content": { - "type": "text" - } + "properties": { + "title": { + "type": "text", + "boost": 2 <1> + }, + "content": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/coerce.asciidoc b/docs/reference/mapping/params/coerce.asciidoc index dbb7ca84ba6fd..55f31262351fd 100644 --- a/docs/reference/mapping/params/coerce.asciidoc +++ b/docs/reference/mapping/params/coerce.asciidoc @@ -17,18 +17,16 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer" - }, - "number_two": { - "type": "integer", - "coerce": false - } + "properties": { + "number_one": { + "type": "integer" + }, + "number_two": { + "type": "integer", + "coerce": false } } } @@ -61,21 +59,19 @@ coercion globally across all mapping types: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.mapping.coerce": false }, "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer", - "coerce": true - }, - "number_two": { - "type": "integer" - } + "properties": { + "number_one": { + "type": "integer", + "coerce": true + }, + "number_two": { + "type": "integer" } } } diff --git a/docs/reference/mapping/params/copy-to.asciidoc b/docs/reference/mapping/params/copy-to.asciidoc index d56258aa73324..1796b31360aed 100644 --- a/docs/reference/mapping/params/copy-to.asciidoc +++ b/docs/reference/mapping/params/copy-to.asciidoc @@ -8,22 +8,20 @@ the `full_name` field as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "first_name": { - "type": "text", - "copy_to": "full_name" <1> - }, - "last_name": { - "type": "text", - "copy_to": "full_name" <1> - }, - "full_name": { - "type": "text" - } + "properties": { + "first_name": { + "type": "text", + "copy_to": "full_name" <1> + }, + "last_name": { + "type": "text", + "copy_to": "full_name" <1> + }, + "full_name": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/doc-values.asciidoc b/docs/reference/mapping/params/doc-values.asciidoc index 74efc1bbbfabd..5680cdabba884 100644 --- a/docs/reference/mapping/params/doc-values.asciidoc +++ b/docs/reference/mapping/params/doc-values.asciidoc @@ -23,18 +23,16 @@ value from a script, you can disable doc values in order to save disk space: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "status_code": { <1> - "type": "keyword" - }, - "session_id": { <2> - "type": "keyword", - "doc_values": false - } + "properties": { + "status_code": { <1> + "type": "keyword" + }, + "session_id": { <2> + "type": "keyword", + "doc_values": false } } } diff --git a/docs/reference/mapping/params/dynamic.asciidoc b/docs/reference/mapping/params/dynamic.asciidoc index f7159724f48f4..62d61f5f095a6 100644 --- a/docs/reference/mapping/params/dynamic.asciidoc +++ b/docs/reference/mapping/params/dynamic.asciidoc @@ -58,21 +58,19 @@ object or from the mapping type. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "dynamic": false, <1> - "properties": { - "user": { <2> - "properties": { - "name": { - "type": "text" - }, - "social_networks": { <3> - "dynamic": true, - "properties": {} - } + "dynamic": false, <1> + "properties": { + "user": { <2> + "properties": { + "name": { + "type": "text" + }, + "social_networks": { <3> + "dynamic": true, + "properties": {} } } } diff --git a/docs/reference/mapping/params/enabled.asciidoc b/docs/reference/mapping/params/enabled.asciidoc index fbcd25c716bbf..06b76ddeae006 100644 --- a/docs/reference/mapping/params/enabled.asciidoc +++ b/docs/reference/mapping/params/enabled.asciidoc @@ -15,20 +15,18 @@ in any other way: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "user_id": { - "type": "keyword" - }, - "last_updated": { - "type": "date" - }, - "session_data": { <1> - "enabled": false - } + "properties": { + "user_id": { + "type": "keyword" + }, + "last_updated": { + "type": "date" + }, + "session_data": { <1> + "enabled": false } } } @@ -63,12 +61,10 @@ retrieved, but none of its contents are indexed in any way: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "enabled": false - } + "enabled": false <1> } } @@ -88,7 +84,7 @@ GET my_index/_doc/session_1 <2> GET my_index/_mapping <3> -------------------------------------------------- // CONSOLE -<1> The entire `_doc` mapping type is disabled. +<1> The entire mapping type is disabled. <2> The document can be retrieved. <3> Checking the mapping reveals that no fields have been added. diff --git a/docs/reference/mapping/params/fielddata.asciidoc b/docs/reference/mapping/params/fielddata.asciidoc index eee7463c6400c..42f02b7ee28ea 100644 --- a/docs/reference/mapping/params/fielddata.asciidoc +++ b/docs/reference/mapping/params/fielddata.asciidoc @@ -55,17 +55,15 @@ enabled for aggregations, as follows: [source,js] --------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_field": { <1> - "type": "text", - "fields": { - "keyword": { <2> - "type": "keyword" - } + "properties": { + "my_field": { <1> + "type": "text", + "fields": { + "keyword": { <2> + "type": "keyword" } } } @@ -118,19 +116,17 @@ number of docs that the segment should contain with `min_segment_size`: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "tag": { - "type": "text", - "fielddata": true, - "fielddata_frequency_filter": { - "min": 0.001, - "max": 0.1, - "min_segment_size": 500 - } + "properties": { + "tag": { + "type": "text", + "fielddata": true, + "fielddata_frequency_filter": { + "min": 0.001, + "max": 0.1, + "min_segment_size": 500 } } } diff --git a/docs/reference/mapping/params/format.asciidoc b/docs/reference/mapping/params/format.asciidoc index 641433685d3a2..2be1bdf12d891 100644 --- a/docs/reference/mapping/params/format.asciidoc +++ b/docs/reference/mapping/params/format.asciidoc @@ -11,15 +11,13 @@ Besides the <>, your own [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date", - "format": "yyyy-MM-dd" - } + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd" } } } diff --git a/docs/reference/mapping/params/ignore-above.asciidoc b/docs/reference/mapping/params/ignore-above.asciidoc index 19c275dac5d01..daf5c92bcf34d 100644 --- a/docs/reference/mapping/params/ignore-above.asciidoc +++ b/docs/reference/mapping/params/ignore-above.asciidoc @@ -8,15 +8,13 @@ NOTE: All strings/array elements will still be present in the `_source` field, i [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "message": { - "type": "keyword", - "ignore_above": 20 <1> - } + "properties": { + "message": { + "type": "keyword", + "ignore_above": 20 <1> } } } diff --git a/docs/reference/mapping/params/ignore-malformed.asciidoc b/docs/reference/mapping/params/ignore-malformed.asciidoc index 84a53515d9ebe..30aa6c4e8bc0d 100644 --- a/docs/reference/mapping/params/ignore-malformed.asciidoc +++ b/docs/reference/mapping/params/ignore-malformed.asciidoc @@ -14,18 +14,16 @@ For example: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_one": { - "type": "integer", - "ignore_malformed": true - }, - "number_two": { - "type": "integer" - } + "properties": { + "number_one": { + "type": "integer", + "ignore_malformed": true + }, + "number_two": { + "type": "integer" } } } @@ -61,21 +59,19 @@ allow to ignore malformed content globally across all mapping types. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "index.mapping.ignore_malformed": true <1> }, "mappings": { - "_doc": { - "properties": { - "number_one": { <1> - "type": "byte" - }, - "number_two": { - "type": "integer", - "ignore_malformed": false <2> - } + "properties": { + "number_one": { <1> + "type": "byte" + }, + "number_two": { + "type": "integer", + "ignore_malformed": false <2> } } } diff --git a/docs/reference/mapping/params/index-options.asciidoc b/docs/reference/mapping/params/index-options.asciidoc index cda680399dde2..527050a87b29b 100644 --- a/docs/reference/mapping/params/index-options.asciidoc +++ b/docs/reference/mapping/params/index-options.asciidoc @@ -35,15 +35,13 @@ all other fields use `docs` as the default. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "index_options": "offsets" - } + "properties": { + "text": { + "type": "text", + "index_options": "offsets" } } } diff --git a/docs/reference/mapping/params/index-prefixes.asciidoc b/docs/reference/mapping/params/index-prefixes.asciidoc index baca8606f5c03..841ccdde08776 100644 --- a/docs/reference/mapping/params/index-prefixes.asciidoc +++ b/docs/reference/mapping/params/index-prefixes.asciidoc @@ -19,15 +19,13 @@ This example creates a text field using the default prefix length settings: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "body_text": { - "type": "text", - "index_prefixes": { } <1> - } + "properties": { + "body_text": { + "type": "text", + "index_prefixes": { } <1> } } } @@ -42,17 +40,15 @@ This example uses custom prefix length settings: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_name": { - "type": "text", - "index_prefixes": { - "min_chars" : 1, - "max_chars" : 10 - } + "properties": { + "full_name": { + "type": "text", + "index_prefixes": { + "min_chars" : 1, + "max_chars" : 10 } } } diff --git a/docs/reference/mapping/params/multi-fields.asciidoc b/docs/reference/mapping/params/multi-fields.asciidoc index e4bdb04506d92..ee1bc02c7fd8d 100644 --- a/docs/reference/mapping/params/multi-fields.asciidoc +++ b/docs/reference/mapping/params/multi-fields.asciidoc @@ -8,17 +8,15 @@ search, and as a `keyword` field for sorting or aggregations: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "city": { - "type": "text", - "fields": { - "raw": { <1> - "type": "keyword" - } + "properties": { + "city": { + "type": "text", + "fields": { + "raw": { <1> + "type": "keyword" } } } @@ -76,18 +74,16 @@ which stems words into their root form: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { <1> - "type": "text", - "fields": { - "english": { <2> - "type": "text", - "analyzer": "english" - } + "properties": { + "text": { <1> + "type": "text", + "fields": { + "english": { <2> + "type": "text", + "analyzer": "english" } } } diff --git a/docs/reference/mapping/params/normalizer.asciidoc b/docs/reference/mapping/params/normalizer.asciidoc index 79ba39e194726..da0298abda228 100644 --- a/docs/reference/mapping/params/normalizer.asciidoc +++ b/docs/reference/mapping/params/normalizer.asciidoc @@ -12,7 +12,7 @@ such as the <> query. [source,js] -------------------------------- -PUT index?include_type_name=true +PUT index { "settings": { "analysis": { @@ -26,12 +26,10 @@ PUT index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "foo": { - "type": "keyword", - "normalizer": "my_normalizer" - } + "properties": { + "foo": { + "type": "keyword", + "normalizer": "my_normalizer" } } } diff --git a/docs/reference/mapping/params/null-value.asciidoc b/docs/reference/mapping/params/null-value.asciidoc index b2f46a2c7cef3..0a618ddcac9bc 100644 --- a/docs/reference/mapping/params/null-value.asciidoc +++ b/docs/reference/mapping/params/null-value.asciidoc @@ -10,15 +10,13 @@ the specified value so that it can be indexed and searched. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "status_code": { - "type": "keyword", - "null_value": "NULL" <1> - } + "properties": { + "status_code": { + "type": "keyword", + "null_value": "NULL" <1> } } } diff --git a/docs/reference/mapping/params/position-increment-gap.asciidoc b/docs/reference/mapping/params/position-increment-gap.asciidoc index ae6cc85865709..853f98ade75b8 100644 --- a/docs/reference/mapping/params/position-increment-gap.asciidoc +++ b/docs/reference/mapping/params/position-increment-gap.asciidoc @@ -51,15 +51,13 @@ The `position_increment_gap` can be specified in the mapping. For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "names": { - "type": "text", - "position_increment_gap": 0 <1> - } + "properties": { + "names": { + "type": "text", + "position_increment_gap": 0 <1> } } } diff --git a/docs/reference/mapping/params/properties.asciidoc b/docs/reference/mapping/params/properties.asciidoc index 2efef0abf3cdc..9837dd3d5d9f4 100644 --- a/docs/reference/mapping/params/properties.asciidoc +++ b/docs/reference/mapping/params/properties.asciidoc @@ -15,23 +15,21 @@ field, and a `nested` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "properties": { - "manager": { <2> - "properties": { - "age": { "type": "integer" }, - "name": { "type": "text" } - } - }, - "employees": { <3> - "type": "nested", - "properties": { - "age": { "type": "integer" }, - "name": { "type": "text" } - } + "properties": { <1> + "manager": { + "properties": { <2> + "age": { "type": "integer" }, + "name": { "type": "text" } + } + }, + "employees": { + "type": "nested", + "properties": { <3> + "age": { "type": "integer" }, + "name": { "type": "text" } } } } @@ -58,7 +56,7 @@ PUT my_index/_doc/1 <4> } -------------------------------------------------- // CONSOLE -<1> Properties under the `_doc` mapping type. +<1> Properties in the top-level mappings definition. <2> Properties under the `manager` object field. <3> Properties under the `employees` nested field. <4> An example document which corresponds to the above mapping. diff --git a/docs/reference/mapping/params/search-analyzer.asciidoc b/docs/reference/mapping/params/search-analyzer.asciidoc index eb483a3af384f..9b142f58c5960 100644 --- a/docs/reference/mapping/params/search-analyzer.asciidoc +++ b/docs/reference/mapping/params/search-analyzer.asciidoc @@ -14,7 +14,7 @@ this can be overridden with the `search_analyzer` setting: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "settings": { "analysis": { @@ -38,13 +38,11 @@ PUT my_index?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "analyzer": "autocomplete", <2> - "search_analyzer": "standard" <2> - } + "properties": { + "text": { + "type": "text", + "analyzer": "autocomplete", <2> + "search_analyzer": "standard" <2> } } } diff --git a/docs/reference/mapping/params/similarity.asciidoc b/docs/reference/mapping/params/similarity.asciidoc index 9e6e4e0877830..8085adf9110de 100644 --- a/docs/reference/mapping/params/similarity.asciidoc +++ b/docs/reference/mapping/params/similarity.asciidoc @@ -36,18 +36,16 @@ as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "default_field": { <1> - "type": "text" - }, - "boolean_sim_field": { - "type": "text", - "similarity": "boolean" <2> - } + "properties": { + "default_field": { <1> + "type": "text" + }, + "boolean_sim_field": { + "type": "text", + "similarity": "boolean" <2> } } } diff --git a/docs/reference/mapping/params/store.asciidoc b/docs/reference/mapping/params/store.asciidoc index 186666ef5dd73..d3ebe13d4ad62 100644 --- a/docs/reference/mapping/params/store.asciidoc +++ b/docs/reference/mapping/params/store.asciidoc @@ -18,22 +18,20 @@ to extract those fields from a large `_source` field: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "store": true <1> - }, - "date": { - "type": "date", - "store": true <1> - }, - "content": { - "type": "text" - } + "properties": { + "title": { + "type": "text", + "store": true <1> + }, + "date": { + "type": "date", + "store": true <1> + }, + "content": { + "type": "text" } } } diff --git a/docs/reference/mapping/params/term-vector.asciidoc b/docs/reference/mapping/params/term-vector.asciidoc index 491a002f3d612..ff05539522efc 100644 --- a/docs/reference/mapping/params/term-vector.asciidoc +++ b/docs/reference/mapping/params/term-vector.asciidoc @@ -29,15 +29,13 @@ index. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "text": { - "type": "text", - "term_vector": "with_positions_offsets" - } + "properties": { + "text": { + "type": "text", + "term_vector": "with_positions_offsets" } } } diff --git a/docs/reference/mapping/types/alias.asciidoc b/docs/reference/mapping/types/alias.asciidoc index 75a2b65e5c110..318124d71333e 100644 --- a/docs/reference/mapping/types/alias.asciidoc +++ b/docs/reference/mapping/types/alias.asciidoc @@ -7,21 +7,19 @@ and selected other APIs like <>. [source,js] -------------------------------- -PUT trips?include_type_name=true +PUT trips { "mappings": { - "_doc": { - "properties": { - "distance": { - "type": "long" - }, - "route_length_miles": { - "type": "alias", - "path": "distance" // <1> - }, - "transit_mode": { - "type": "keyword" - } + "properties": { + "distance": { + "type": "long" + }, + "route_length_miles": { + "type": "alias", + "path": "distance" // <1> + }, + "transit_mode": { + "type": "keyword" } } } diff --git a/docs/reference/mapping/types/binary.asciidoc b/docs/reference/mapping/types/binary.asciidoc index ebd50802b8d84..22e107dab565d 100644 --- a/docs/reference/mapping/types/binary.asciidoc +++ b/docs/reference/mapping/types/binary.asciidoc @@ -7,17 +7,15 @@ stored by default and is not searchable: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "name": { - "type": "text" - }, - "blob": { - "type": "binary" - } + "properties": { + "name": { + "type": "text" + }, + "blob": { + "type": "binary" } } } diff --git a/docs/reference/mapping/types/boolean.asciidoc b/docs/reference/mapping/types/boolean.asciidoc index 3a92ae23e2fd9..962022060b65b 100644 --- a/docs/reference/mapping/types/boolean.asciidoc +++ b/docs/reference/mapping/types/boolean.asciidoc @@ -17,14 +17,12 @@ For example: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "is_published": { - "type": "boolean" - } + "properties": { + "is_published": { + "type": "boolean" } } } diff --git a/docs/reference/mapping/types/date.asciidoc b/docs/reference/mapping/types/date.asciidoc index 03024d1383945..94aadb46fb2b6 100644 --- a/docs/reference/mapping/types/date.asciidoc +++ b/docs/reference/mapping/types/date.asciidoc @@ -30,14 +30,12 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date" <1> - } + "properties": { + "date": { + "type": "date" <1> } } } @@ -74,15 +72,13 @@ into a string. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "date": { - "type": "date", - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" - } + "properties": { + "date": { + "type": "date", + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } } diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index b12e9e120a479..b97566361a05c 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -17,17 +17,15 @@ You index a dense vector as an array of floats. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_vector": { - "type": "dense_vector" - }, - "my_text" : { - "type" : "keyword" - } + "properties": { + "my_vector": { + "type": "dense_vector" + }, + "my_text" : { + "type" : "keyword" } } } diff --git a/docs/reference/mapping/types/feature-vector.asciidoc b/docs/reference/mapping/types/feature-vector.asciidoc index 25358ab9ec3a6..b4701fc9ab7dd 100644 --- a/docs/reference/mapping/types/feature-vector.asciidoc +++ b/docs/reference/mapping/types/feature-vector.asciidoc @@ -11,14 +11,12 @@ one field to the mappings for each of them. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "topics": { - "type": "feature_vector" <1> - } + "properties": { + "topics": { + "type": "feature_vector" <1> } } } diff --git a/docs/reference/mapping/types/feature.asciidoc b/docs/reference/mapping/types/feature.asciidoc index 76eada86c59e5..7fe8ff6f935af 100644 --- a/docs/reference/mapping/types/feature.asciidoc +++ b/docs/reference/mapping/types/feature.asciidoc @@ -6,18 +6,16 @@ documents in queries with a <> query. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "pagerank": { - "type": "feature" <1> - }, - "url_length": { - "type": "feature", - "positive_score_impact": false <2> - } + "properties": { + "pagerank": { + "type": "feature" <1> + }, + "url_length": { + "type": "feature", + "positive_score_impact": false <2> } } } diff --git a/docs/reference/mapping/types/geo-point.asciidoc b/docs/reference/mapping/types/geo-point.asciidoc index 752e611db811f..51e137fbc33b6 100644 --- a/docs/reference/mapping/types/geo-point.asciidoc +++ b/docs/reference/mapping/types/geo-point.asciidoc @@ -15,14 +15,12 @@ There are four ways that a geo-point may be specified, as demonstrated below: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/mapping/types/ip.asciidoc b/docs/reference/mapping/types/ip.asciidoc index 6206df61ac3b6..7f3f5f57d7077 100644 --- a/docs/reference/mapping/types/ip.asciidoc +++ b/docs/reference/mapping/types/ip.asciidoc @@ -6,14 +6,12 @@ https://en.wikipedia.org/wiki/IPv6[IPv6] addresses. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "ip_addr": { - "type": "ip" - } + "properties": { + "ip_addr": { + "type": "ip" } } } diff --git a/docs/reference/mapping/types/keyword.asciidoc b/docs/reference/mapping/types/keyword.asciidoc index e9e913be3a6d0..8ac0983dc9550 100644 --- a/docs/reference/mapping/types/keyword.asciidoc +++ b/docs/reference/mapping/types/keyword.asciidoc @@ -15,14 +15,12 @@ Below is an example of a mapping for a keyword field: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "tags": { - "type": "keyword" - } + "properties": { + "tags": { + "type": "keyword" } } } diff --git a/docs/reference/mapping/types/nested.asciidoc b/docs/reference/mapping/types/nested.asciidoc index 4681897575189..f420e680c8590 100644 --- a/docs/reference/mapping/types/nested.asciidoc +++ b/docs/reference/mapping/types/nested.asciidoc @@ -75,14 +75,12 @@ queried independently of the others, with the < - } + "properties": { + "user": { + "type": "nested" <1> } } } diff --git a/docs/reference/mapping/types/numeric.asciidoc b/docs/reference/mapping/types/numeric.asciidoc index a279f9ca24e6a..f2977957ff463 100644 --- a/docs/reference/mapping/types/numeric.asciidoc +++ b/docs/reference/mapping/types/numeric.asciidoc @@ -17,21 +17,19 @@ Below is an example of configuring a mapping with numeric fields: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "number_of_bytes": { - "type": "integer" - }, - "time_in_seconds": { - "type": "float" - }, - "price": { - "type": "scaled_float", - "scaling_factor": 100 - } + "properties": { + "number_of_bytes": { + "type": "integer" + }, + "time_in_seconds": { + "type": "float" + }, + "price": { + "type": "scaled_float", + "scaling_factor": 100 } } } diff --git a/docs/reference/mapping/types/object.asciidoc b/docs/reference/mapping/types/object.asciidoc index 1bf9afa3c3277..f5b9a9df85617 100644 --- a/docs/reference/mapping/types/object.asciidoc +++ b/docs/reference/mapping/types/object.asciidoc @@ -41,22 +41,20 @@ An explicit mapping for the above document could look like this: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { <1> - "properties": { - "region": { - "type": "keyword" - }, - "manager": { <2> - "properties": { - "age": { "type": "integer" }, - "name": { <3> - "properties": { - "first": { "type": "text" }, - "last": { "type": "text" } - } + "properties": { <1> + "region": { + "type": "keyword" + }, + "manager": { <2> + "properties": { + "age": { "type": "integer" }, + "name": { <3> + "properties": { + "first": { "type": "text" }, + "last": { "type": "text" } } } } @@ -66,7 +64,7 @@ PUT my_index?include_type_name=true } -------------------------------------------------- // CONSOLE -<1> The mapping type is a type of object, and has a `properties` field. +<1> Properties in the top-level mappings definition. <2> The `manager` field is an inner `object` field. <3> The `manager.name` field is an inner `object` field within the `manager` field. diff --git a/docs/reference/mapping/types/parent-join.asciidoc b/docs/reference/mapping/types/parent-join.asciidoc index dacef7c4bc7cb..39bcaa96d7764 100644 --- a/docs/reference/mapping/types/parent-join.asciidoc +++ b/docs/reference/mapping/types/parent-join.asciidoc @@ -9,16 +9,14 @@ A parent/child relation can be defined as follows: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { <1> - "type": "join", - "relations": { - "question": "answer" <2> - } + "properties": { + "my_join_field": { <1> + "type": "join", + "relations": { + "question": "answer" <2> } } } @@ -319,18 +317,16 @@ make sense to disable eager loading: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": "answer" - }, - "eager_global_ordinals": false - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": "answer" + }, + "eager_global_ordinals": false } } } @@ -358,16 +354,14 @@ It is also possible to define multiple children for a single parent: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": ["answer", "comment"] <1> - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": ["answer", "comment"] <1> } } } @@ -388,17 +382,15 @@ Multiple levels of parent/child: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "question": ["answer", "comment"], <1> - "answer": "vote" <2> - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "question": ["answer", "comment"], <1> + "answer": "vote" <2> } } } diff --git a/docs/reference/mapping/types/percolator.asciidoc b/docs/reference/mapping/types/percolator.asciidoc index 7324826eb44ef..0096746d2df35 100644 --- a/docs/reference/mapping/types/percolator.asciidoc +++ b/docs/reference/mapping/types/percolator.asciidoc @@ -15,17 +15,15 @@ If the following mapping configures the `percolator` field type for the [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "field": { - "type": "text" - } + "properties": { + "query": { + "type": "percolator" + }, + "field": { + "type": "text" } } } @@ -69,17 +67,15 @@ Lets take a look at the following index with a percolator field type: [source,js] -------------------------------------------------- -PUT index?include_type_name=true +PUT index { "mappings": { - "_doc" : { - "properties": { - "query" : { - "type" : "percolator" - }, - "body" : { - "type": "text" - } + "properties": { + "query" : { + "type" : "percolator" + }, + "body" : { + "type": "text" } } } @@ -117,17 +113,15 @@ to read your queries you need to reindex your queries into a new index on the cu [source,js] -------------------------------------------------- -PUT new_index?include_type_name=true +PUT new_index { "mappings": { - "_doc" : { - "properties": { - "query" : { - "type" : "percolator" - }, - "body" : { - "type": "text" - } + "properties": { + "query" : { + "type" : "percolator" + }, + "body" : { + "type": "text" } } } @@ -269,7 +263,7 @@ with these settings and mapping: [source,js] -------------------------------------------------- -PUT /test_index?include_type_name=true +PUT /test_index { "settings": { "analysis": { @@ -282,15 +276,13 @@ PUT /test_index?include_type_name=true } }, "mappings": { - "_doc" : { - "properties": { - "query" : { - "type": "percolator" - }, - "body" : { - "type": "text", - "analyzer": "my_analyzer" <1> - } + "properties": { + "query" : { + "type": "percolator" + }, + "body" : { + "type": "text", + "analyzer": "my_analyzer" <1> } } } @@ -442,7 +434,7 @@ Creating an index with custom analysis settings: [source,js] -------------------------------------------------- -PUT my_queries1?include_type_name=true +PUT my_queries1 { "settings": { "analysis": { @@ -466,19 +458,17 @@ PUT my_queries1?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "my_field": { - "type": "text", - "fields": { - "prefix": { <3> - "type": "text", - "analyzer": "wildcard_prefix", - "search_analyzer": "standard" - } + "properties": { + "query": { + "type": "percolator" + }, + "my_field": { + "type": "text", + "fields": { + "prefix": { <3> + "type": "text", + "analyzer": "wildcard_prefix", + "search_analyzer": "standard" } } } @@ -595,7 +585,7 @@ before the `edge_ngram` token filter. [source,js] -------------------------------------------------- -PUT my_queries2?include_type_name=true +PUT my_queries2 { "settings": { "analysis": { @@ -628,19 +618,17 @@ PUT my_queries2?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "query": { - "type": "percolator" - }, - "my_field": { - "type": "text", - "fields": { - "suffix": { - "type": "text", - "analyzer": "wildcard_suffix", - "search_analyzer": "wildcard_suffix_search_time" <1> - } + "properties": { + "query": { + "type": "percolator" + }, + "my_field": { + "type": "text", + "fields": { + "suffix": { + "type": "text", + "analyzer": "wildcard_suffix", + "search_analyzer": "wildcard_suffix_search_time" <1> } } } diff --git a/docs/reference/mapping/types/range.asciidoc b/docs/reference/mapping/types/range.asciidoc index 630458b4866e5..79c9e6629c696 100644 --- a/docs/reference/mapping/types/range.asciidoc +++ b/docs/reference/mapping/types/range.asciidoc @@ -16,21 +16,19 @@ Below is an example of configuring a mapping with various range fields followed [source,js] -------------------------------------------------- -PUT range_index?include_type_name=true +PUT range_index { "settings": { "number_of_shards": 2 }, "mappings": { - "_doc": { - "properties": { - "expected_attendees": { - "type": "integer_range" - }, - "time_frame": { - "type": "date_range", <1> - "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" - } + "properties": { + "expected_attendees": { + "type": "integer_range" + }, + "time_frame": { + "type": "date_range", <1> + "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_millis" } } } diff --git a/docs/reference/mapping/types/sparse-vector.asciidoc b/docs/reference/mapping/types/sparse-vector.asciidoc index df374e857ba6a..38561789b5d3f 100644 --- a/docs/reference/mapping/types/sparse-vector.asciidoc +++ b/docs/reference/mapping/types/sparse-vector.asciidoc @@ -20,17 +20,15 @@ Dimensions don't need to be in order. [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_vector": { - "type": "sparse_vector" - }, - "my_text" : { - "type" : "keyword" - } + "properties": { + "my_vector": { + "type": "sparse_vector" + }, + "my_text" : { + "type" : "keyword" } } } diff --git a/docs/reference/mapping/types/text.asciidoc b/docs/reference/mapping/types/text.asciidoc index 1b3c0a0eaf112..ee972918988ad 100644 --- a/docs/reference/mapping/types/text.asciidoc +++ b/docs/reference/mapping/types/text.asciidoc @@ -17,14 +17,12 @@ Below is an example of a mapping for a text field: [source,js] -------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_name": { - "type": "text" - } + "properties": { + "full_name": { + "type": "text" } } } diff --git a/docs/reference/mapping/types/token-count.asciidoc b/docs/reference/mapping/types/token-count.asciidoc index 469583abf1bcd..d574c25e93d19 100644 --- a/docs/reference/mapping/types/token-count.asciidoc +++ b/docs/reference/mapping/types/token-count.asciidoc @@ -9,18 +9,16 @@ For instance: [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "name": { <1> - "type": "text", - "fields": { - "length": { <2> - "type": "token_count", - "analyzer": "standard" - } + "properties": { + "name": { <1> + "type": "text", + "fields": { + "length": { <2> + "type": "token_count", + "analyzer": "standard" } } } diff --git a/docs/reference/ml/transforms.asciidoc b/docs/reference/ml/transforms.asciidoc index 02dd4d5469cfd..a3e7df9fdf27a 100644 --- a/docs/reference/ml/transforms.asciidoc +++ b/docs/reference/ml/transforms.asciidoc @@ -26,51 +26,49 @@ subsequent examples: [source,js] ---------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings":{ - "_doc":{ - "properties": { - "@timestamp": { - "type": "date" - }, - "aborted_count": { - "type": "long" - }, - "another_field": { - "type": "keyword" <1> - }, - "clientip": { - "type": "keyword" - }, - "coords": { - "properties": { - "lat": { - "type": "keyword" - }, - "lon": { - "type": "keyword" - } + "properties": { + "@timestamp": { + "type": "date" + }, + "aborted_count": { + "type": "long" + }, + "another_field": { + "type": "keyword" <1> + }, + "clientip": { + "type": "keyword" + }, + "coords": { + "properties": { + "lat": { + "type": "keyword" + }, + "lon": { + "type": "keyword" } - }, - "error_count": { - "type": "long" - }, - "query": { - "type": "keyword" - }, - "some_field": { - "type": "keyword" - }, - "tokenstring1":{ - "type":"keyword" - }, - "tokenstring2":{ - "type":"keyword" - }, - "tokenstring3":{ - "type":"keyword" } + }, + "error_count": { + "type": "long" + }, + "query": { + "type": "keyword" + }, + "some_field": { + "type": "keyword" + }, + "tokenstring1":{ + "type":"keyword" + }, + "tokenstring2":{ + "type":"keyword" + }, + "tokenstring3":{ + "type":"keyword" } } } diff --git a/docs/reference/query-dsl/exists-query.asciidoc b/docs/reference/query-dsl/exists-query.asciidoc index 16828c3751a39..b2e27c76e494d 100644 --- a/docs/reference/query-dsl/exists-query.asciidoc +++ b/docs/reference/query-dsl/exists-query.asciidoc @@ -52,15 +52,13 @@ instance, if the `user` field were mapped as follows: [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "_doc": { - "properties": { - "user": { - "type": "keyword", - "null_value": "_null_" - } + "properties": { + "user": { + "type": "keyword", + "null_value": "_null_" } } } diff --git a/docs/reference/query-dsl/feature-query.asciidoc b/docs/reference/query-dsl/feature-query.asciidoc index c261133cff412..353c135bf8efd 100644 --- a/docs/reference/query-dsl/feature-query.asciidoc +++ b/docs/reference/query-dsl/feature-query.asciidoc @@ -27,21 +27,19 @@ based or `pagerank`, `url_length` and the `sports` topic. [source,js] -------------------------------------------------- -PUT test?include_type_name=true +PUT test { "mappings": { - "_doc": { - "properties": { - "pagerank": { - "type": "feature" - }, - "url_length": { - "type": "feature", - "positive_score_impact": false - }, - "topics": { - "type": "feature_vector" - } + "properties": { + "pagerank": { + "type": "feature" + }, + "url_length": { + "type": "feature", + "positive_score_impact": false + }, + "topics": { + "type": "feature_vector" } } } diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index 8c2b25ea0ee7a..487e944c09e10 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -6,16 +6,14 @@ bounding box. Assuming the following indexed document: [source,js] -------------------------------------------------- -PUT /my_locations?include_type_name=true +PUT /my_locations { "mappings": { - "_doc": { - "properties": { - "pin": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "pin": { + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/query-dsl/geo-distance-query.asciidoc b/docs/reference/query-dsl/geo-distance-query.asciidoc index da1f99f834d67..da7b0ecfd81e5 100644 --- a/docs/reference/query-dsl/geo-distance-query.asciidoc +++ b/docs/reference/query-dsl/geo-distance-query.asciidoc @@ -7,16 +7,14 @@ document: [source,js] -------------------------------------------------- -PUT /my_locations?include_type_name=true +PUT /my_locations { "mappings": { - "_doc": { - "properties": { - "pin": { - "properties": { - "location": { - "type": "geo_point" - } + "properties": { + "pin": { + "properties": { + "location": { + "type": "geo_point" } } } diff --git a/docs/reference/query-dsl/geo-shape-query.asciidoc b/docs/reference/query-dsl/geo-shape-query.asciidoc index 4121ebfc45edd..424968090d6ab 100644 --- a/docs/reference/query-dsl/geo-shape-query.asciidoc +++ b/docs/reference/query-dsl/geo-shape-query.asciidoc @@ -24,14 +24,12 @@ Given the following index: [source,js] -------------------------------------------------- -PUT /example?include_type_name=true +PUT /example { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_shape" - } + "properties": { + "location": { + "type": "geo_shape" } } } @@ -99,14 +97,12 @@ shape: [source,js] -------------------------------------------------- -PUT /shapes?include_type_name=true +PUT /shapes { "mappings": { - "_doc": { - "properties": { - "location": { - "type": "geo_shape" - } + "properties": { + "location": { + "type": "geo_shape" } } } diff --git a/docs/reference/query-dsl/nested-query.asciidoc b/docs/reference/query-dsl/nested-query.asciidoc index f75348ca976ec..c58d68b73cff1 100644 --- a/docs/reference/query-dsl/nested-query.asciidoc +++ b/docs/reference/query-dsl/nested-query.asciidoc @@ -10,14 +10,12 @@ will work with: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc" : { - "properties" : { - "obj1" : { - "type" : "nested" - } + "properties" : { + "obj1" : { + "type" : "nested" } } } diff --git a/docs/reference/query-dsl/parent-id-query.asciidoc b/docs/reference/query-dsl/parent-id-query.asciidoc index ff306238ddea0..aa2074e7d1b7e 100644 --- a/docs/reference/query-dsl/parent-id-query.asciidoc +++ b/docs/reference/query-dsl/parent-id-query.asciidoc @@ -6,16 +6,14 @@ Given the following mapping definition: [source,js] -------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "my_join_field": { - "type": "join", - "relations": { - "my_parent": "my_child" - } + "properties": { + "my_join_field": { + "type": "join", + "relations": { + "my_parent": "my_child" } } } diff --git a/docs/reference/query-dsl/percolate-query.asciidoc b/docs/reference/query-dsl/percolate-query.asciidoc index 9ae6dd802137c..89264af0f2619 100644 --- a/docs/reference/query-dsl/percolate-query.asciidoc +++ b/docs/reference/query-dsl/percolate-query.asciidoc @@ -13,18 +13,16 @@ Create an index with two fields: [source,js] -------------------------------------------------- -PUT /my-index?include_type_name=true +PUT /my-index { "mappings": { - "_doc": { - "properties": { - "message": { - "type": "text" - }, - "query": { - "type": "percolator" - } - } + "properties": { + "message": { + "type": "text" + }, + "query": { + "type": "percolator" + } } } } diff --git a/docs/reference/query-dsl/term-query.asciidoc b/docs/reference/query-dsl/term-query.asciidoc index 26dd6ee492f21..910123bbe6177 100644 --- a/docs/reference/query-dsl/term-query.asciidoc +++ b/docs/reference/query-dsl/term-query.asciidoc @@ -87,17 +87,15 @@ To demonstrate, try out the example below. First, create an index, specifying t [source,js] -------------------------------------------------- -PUT my_index?include_type_name=true +PUT my_index { "mappings": { - "_doc": { - "properties": { - "full_text": { - "type": "text" <1> - }, - "exact_value": { - "type": "keyword" <2> - } + "properties": { + "full_text": { + "type": "text" <1> + }, + "exact_value": { + "type": "keyword" <2> } } } diff --git a/docs/reference/query-dsl/terms-set-query.asciidoc b/docs/reference/query-dsl/terms-set-query.asciidoc index 5c0e00ce360fa..3ebfb672e205f 100644 --- a/docs/reference/query-dsl/terms-set-query.asciidoc +++ b/docs/reference/query-dsl/terms-set-query.asciidoc @@ -12,14 +12,12 @@ be a number field: [source,js] -------------------------------------------------- -PUT /my-index?include_type_name=true +PUT /my-index { "mappings": { - "_doc": { - "properties": { - "required_matches": { - "type": "long" - } + "properties": { + "required_matches": { + "type": "long" } } } diff --git a/docs/reference/search/request/highlighters-internal.asciidoc b/docs/reference/search/request/highlighters-internal.asciidoc index aa3377bdccf61..11534a01aa234 100644 --- a/docs/reference/search/request/highlighters-internal.asciidoc +++ b/docs/reference/search/request/highlighters-internal.asciidoc @@ -87,15 +87,13 @@ using `english` analyzer, and will be indexed without offsets or term vectors. [source,js] -------------------------------------------------- -PUT test_index?include_type_name=true +PUT test_index { "mappings": { - "_doc": { - "properties": { - "content" : { - "type" : "text", - "analyzer" : "english" - } + "properties": { + "content" : { + "type" : "text", + "analyzer" : "english" } } } diff --git a/docs/reference/search/request/post-filter.asciidoc b/docs/reference/search/request/post-filter.asciidoc index b404a30a8250a..c46cdb1e52286 100644 --- a/docs/reference/search/request/post-filter.asciidoc +++ b/docs/reference/search/request/post-filter.asciidoc @@ -9,15 +9,13 @@ Imagine that you are selling shirts that have the following properties: [source,js] -------------------------------------------------- -PUT /shirts?include_type_name=true +PUT /shirts { "mappings": { - "_doc": { - "properties": { - "brand": { "type": "keyword"}, - "color": { "type": "keyword"}, - "model": { "type": "keyword"} - } + "properties": { + "brand": { "type": "keyword"}, + "color": { "type": "keyword"}, + "model": { "type": "keyword"} } } } diff --git a/docs/reference/search/request/sort.asciidoc b/docs/reference/search/request/sort.asciidoc index fe7cf362ea36f..bd8c0d1ad5c27 100644 --- a/docs/reference/search/request/sort.asciidoc +++ b/docs/reference/search/request/sort.asciidoc @@ -9,20 +9,18 @@ Assuming the following index mapping: [source,js] -------------------------------------------------- -PUT /my_index?include_type_name=true +PUT /my_index { "mappings": { - "_doc": { - "properties": { - "post_date": { "type": "date" }, - "user": { - "type": "keyword" - }, - "name": { - "type": "keyword" - }, - "age": { "type": "integer" } - } + "properties": { + "post_date": { "type": "date" }, + "user": { + "type": "keyword" + }, + "name": { + "type": "keyword" + }, + "age": { "type": "integer" } } } } diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index 191f01f17394d..c0b527c06e550 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -24,17 +24,15 @@ which indexes the field values for fast completions. [source,js] -------------------------------------------------- -PUT music?include_type_name=true +PUT music { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion" - }, - "title" : { - "type": "keyword" - } + "properties" : { + "suggest" : { + "type" : "completion" + }, + "title" : { + "type": "keyword" } } } diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index 1ef8968188ccd..63692f0b06f82 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -21,53 +21,49 @@ field: [source,js] -------------------------------------------------- -PUT place?include_type_name=true +PUT place { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion", - "contexts": [ - { <1> - "name": "place_type", - "type": "category" - }, - { <2> - "name": "location", - "type": "geo", - "precision": 4 - } - ] - } + "properties" : { + "suggest" : { + "type" : "completion", + "contexts": [ + { <1> + "name": "place_type", + "type": "category" + }, + { <2> + "name": "location", + "type": "geo", + "precision": 4 + } + ] } } } } -PUT place_path_category?include_type_name=true +PUT place_path_category { "mappings": { - "_doc" : { - "properties" : { - "suggest" : { - "type" : "completion", - "contexts": [ - { <3> - "name": "place_type", - "type": "category", - "path": "cat" - }, - { <4> - "name": "location", - "type": "geo", - "precision": 4, - "path": "loc" - } - ] - }, - "loc": { - "type": "geo_point" - } + "properties" : { + "suggest" : { + "type" : "completion", + "contexts": [ + { <3> + "name": "place_type", + "type": "category", + "path": "cat" + }, + { <4> + "name": "location", + "type": "geo", + "precision": 4, + "path": "loc" + } + ] + }, + "loc": { + "type": "geo_point" } } } diff --git a/docs/reference/search/suggesters/phrase-suggest.asciidoc b/docs/reference/search/suggesters/phrase-suggest.asciidoc index 08a7aabd1c5de..9c2c56cc40fec 100644 --- a/docs/reference/search/suggesters/phrase-suggest.asciidoc +++ b/docs/reference/search/suggesters/phrase-suggest.asciidoc @@ -23,7 +23,7 @@ work. The `reverse` analyzer is used only in the last example. [source,js] -------------------------------------------------- -PUT test?include_type_name=true +PUT test { "settings": { "index": { @@ -52,19 +52,17 @@ PUT test?include_type_name=true } }, "mappings": { - "_doc": { - "properties": { - "title": { - "type": "text", - "fields": { - "trigram": { - "type": "text", - "analyzer": "trigram" - }, - "reverse": { - "type": "text", - "analyzer": "reverse" - } + "properties": { + "title": { + "type": "text", + "fields": { + "trigram": { + "type": "text", + "analyzer": "trigram" + }, + "reverse": { + "type": "text", + "analyzer": "reverse" } } } diff --git a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc index aca6fc27b1dd8..b9b6d44fd69b7 100644 --- a/x-pack/docs/en/security/authorization/alias-privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/alias-privileges.asciidoc @@ -54,7 +54,7 @@ added to an index directly as part of the index creation: [source,shell] ------------------------------------------------------------------------------- -PUT /2015?include_type_name=true +PUT /2015 { "aliases" : { "current_year" : {} From 3f2723366e4c63da63e400a60211a6c7550ebf1b Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Tue, 22 Jan 2019 17:16:03 +0200 Subject: [PATCH 12/39] Mute failing test Tracking #37708 --- .../java/org/elasticsearch/threadpool/EvilThreadPoolTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java index 64cec9224965b..d118df9abde47 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java @@ -219,6 +219,7 @@ public void testExecutionExceptionOnAutoQueueFixedESThreadPoolExecutor() throws } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37708") public void testExecutionExceptionOnSinglePrioritizingThreadPoolExecutor() throws InterruptedException { final PrioritizedEsThreadPoolExecutor prioritizedExecutor = EsExecutors.newSinglePrioritizing("test", EsExecutors.daemonThreadFactory("test"), threadPool.getThreadContext(), threadPool.scheduler()); From 5c1a1f7ac1740fc902cc77ad47e7a2441b5536b4 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Tue, 22 Jan 2019 17:26:36 +0200 Subject: [PATCH 13/39] Use PEM files for PkiOptionalClientAuthTests (#37683) Use PEM files for the key/cert for TLS on the http layer of the node instead of a JKS keystore so that the tests can also run in a FIPS 140 JVM . Resolves: #37682 --- .../authc/pki/PkiOptionalClientAuthTests.java | 39 ++++++++++--------- 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java index f2a6212107307..3dd749c86705a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/pki/PkiOptionalClientAuthTests.java @@ -49,27 +49,30 @@ protected Settings nodeSettings() { String randomClientPortRange = randomClientPort + "-" + (randomClientPort+100); Settings.Builder builder = Settings.builder() - .put(super.nodeSettings()) - .put("xpack.security.http.ssl.enabled", true) - .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.OPTIONAL) - .put("xpack.security.http.ssl.keystore.path", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) - .put("xpack.security.http.ssl.keystore.password", "testnode") - .put("xpack.security.authc.realms.file.file.order", "0") - .put("xpack.security.authc.realms.pki.pki1.order", "1") - .put("xpack.security.authc.realms.pki.pki1.truststore.path", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) - .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")) - .put("transport.profiles.want_client_auth.port", randomClientPortRange) - .put("transport.profiles.want_client_auth.bind_host", "localhost") - .put("transport.profiles.want_client_auth.xpack.security.ssl.keystore.path", - getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks")) - .put("transport.profiles.want_client_auth.xpack.security.ssl.keystore.password", "testnode") - .put("transport.profiles.want_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.OPTIONAL); + .put(super.nodeSettings()) + .put("xpack.security.http.ssl.enabled", true) + .put("xpack.security.http.ssl.client_authentication", SSLClientAuth.OPTIONAL) + .put("xpack.security.http.ssl.key", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .put("xpack.security.http.ssl.certificate", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .put("xpack.security.authc.realms.file.file.order", "0") + .put("xpack.security.authc.realms.pki.pki1.order", "1") + .put("xpack.security.authc.realms.pki.pki1.truststore.path", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/truststore-testnode-only.jks")) + .put("xpack.security.authc.realms.pki.pki1.files.role_mapping", getDataPath("role_mapping.yml")) + .put("transport.profiles.want_client_auth.port", randomClientPortRange) + .put("transport.profiles.want_client_auth.bind_host", "localhost") + .put("transport.profiles.want_client_auth.xpack.security.ssl.key", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem")) + .put("transport.profiles.want_client_auth.xpack.security.ssl.certificate", + getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt")) + .put("transport.profiles.want_client_auth.xpack.security.ssl.client_authentication", SSLClientAuth.OPTIONAL); SecuritySettingsSource.addSecureSettings(builder, secureSettings -> { secureSettings.setString("xpack.security.authc.realms.pki.pki1.truststore.secure_password", "truststore-testnode-only"); - secureSettings.setString("xpack.security.http.ssl.keystore.secure_password", "testnode"); + secureSettings.setString("xpack.security.http.ssl.secure_key_passphrase", "testnode"); + secureSettings.setString("transport.profiles.want_client_auth.xpack.security.ssl.secure_key_passphrase", "testnode"); }); return builder.build(); From 256e01ca92c43f3ccdc87bfc95031cbcda82e04a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 22 Jan 2019 17:34:13 +0100 Subject: [PATCH 14/39] Fix potential NPE in UsersTool (#37660) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It looks like the output of FileUserPasswdStore.parseFile shouldn't be wrapped into another map since its output can be null. Doing this wrapping after the null check (which potentially raises an exception) instead. --- .../xpack/security/authc/file/tool/UsersTool.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java index 9d4dfba327e5d..6007ef5fd6d03 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/file/tool/UsersTool.java @@ -7,6 +7,7 @@ import joptsimple.OptionSet; import joptsimple.OptionSpec; + import org.elasticsearch.cli.EnvironmentAwareCommand; import org.elasticsearch.cli.ExitCodes; import org.elasticsearch.cli.LoggingAwareMultiCommand; @@ -221,7 +222,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th Path file = FileUserPasswdStore.resolveFile(env); FileAttributesChecker attributesChecker = new FileAttributesChecker(file); - Map users = new HashMap<>(FileUserPasswdStore.parseFile(file, null, env.settings())); + Map users = FileUserPasswdStore.parseFile(file, null, env.settings()); if (users == null) { throw new UserException(ExitCodes.CONFIG, "Configuration file [" + file + "] is missing"); } @@ -229,6 +230,7 @@ protected void execute(Terminal terminal, OptionSet options, Environment env) th throw new UserException(ExitCodes.NO_USER, "User [" + username + "] doesn't exist"); } final Hasher hasher = Hasher.resolve(XPackSettings.PASSWORD_HASHING_ALGORITHM.get(env.settings())); + users = new HashMap<>(users); // make modifiable users.put(username, hasher.hash(new SecureString(password))); FileUserPasswdStore.writeFile(users, file); From 715719ee3b5ee29edd008571d1ce3bf975846a25 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 22 Jan 2019 12:29:24 -0500 Subject: [PATCH 15/39] Remove warn-date from warning headers (#37622) This commit removes the warn-date from warning headers. Previously we were stamping every warning header with when the request occurred. However, this has a severe performance penalty when deprecation logging is called frequently, as obtaining the current time and formatting it properly is expensive. A previous change moved to using the startup time as the time to stamp on every warning header, but this was only to prove that the timestamping was expensive. Since the warn-date is optional, we elect to remove it from the warning header. Prior to this commit, we worked in Kibana to make the warn-date treated as optional there so that we can follow-up in Elasticsearch and remove the warn-date. This commit does that. --- .../org/elasticsearch/client/Response.java | 4 +- .../common/logging/DeprecationLogger.java | 105 ++---------------- 2 files changed, 14 insertions(+), 95 deletions(-) diff --git a/client/rest/src/main/java/org/elasticsearch/client/Response.java b/client/rest/src/main/java/org/elasticsearch/client/Response.java index 4115ef36a5cb4..e267a68bcbd91 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/Response.java +++ b/client/rest/src/main/java/org/elasticsearch/client/Response.java @@ -103,7 +103,7 @@ public HttpEntity getEntity() { private static final Pattern WARNING_HEADER_PATTERN = Pattern.compile( "299 " + // warn code "Elasticsearch-\\d+\\.\\d+\\.\\d+(?:-(?:alpha|beta|rc)\\d+)?(?:-SNAPSHOT)?-(?:[a-f0-9]{7}|Unknown) " + // warn agent - "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\" " + // quoted warning value, captured + "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\"( " + // quoted warning value, captured // quoted RFC 1123 date format "\"" + // opening quote "(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // weekday @@ -112,7 +112,7 @@ public HttpEntity getEntity() { "\\d{4} " + // 4-digit year "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) "GMT" + // GMT - "\""); // closing quote + "\")?"); // closing quote (optional, since an older version can still send a warn-date) /** * Returns a list of all warning headers returned in the response. diff --git a/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 1eb3d52b46cde..0c77271c7ed0f 100644 --- a/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/server/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -29,14 +29,8 @@ import java.nio.charset.Charset; import java.security.AccessController; import java.security.PrivilegedAction; -import java.time.ZoneId; -import java.time.ZonedDateTime; -import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeFormatterBuilder; -import java.time.format.SignStyle; import java.util.BitSet; import java.util.Collections; -import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.Locale; @@ -47,14 +41,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import static java.time.temporal.ChronoField.DAY_OF_MONTH; -import static java.time.temporal.ChronoField.DAY_OF_WEEK; -import static java.time.temporal.ChronoField.HOUR_OF_DAY; -import static java.time.temporal.ChronoField.MINUTE_OF_HOUR; -import static java.time.temporal.ChronoField.MONTH_OF_YEAR; -import static java.time.temporal.ChronoField.SECOND_OF_MINUTE; -import static java.time.temporal.ChronoField.YEAR; - /** * A logger that logs deprecation notices. */ @@ -165,73 +151,14 @@ public void deprecatedAndMaybeLog(final String key, final String msg, final Obje Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "", Build.CURRENT.shortHash()); - /* - * RFC 7234 section 5.5 specifies that the warn-date is a quoted HTTP-date. HTTP-date is defined in RFC 7234 Appendix B as being from - * RFC 7231 section 7.1.1.1. RFC 7231 specifies an HTTP-date as an IMF-fixdate (or an obs-date referring to obsolete formats). The - * grammar for IMF-fixdate is specified as 'day-name "," SP date1 SP time-of-day SP GMT'. Here, day-name is - * (Mon|Tue|Wed|Thu|Fri|Sat|Sun). Then, date1 is 'day SP month SP year' where day is 2DIGIT, month is - * (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec), and year is 4DIGIT. Lastly, time-of-day is 'hour ":" minute ":" second' where - * hour is 2DIGIT, minute is 2DIGIT, and second is 2DIGIT. Finally, 2DIGIT and 4DIGIT have the obvious definitions. - */ - private static final DateTimeFormatter RFC_7231_DATE_TIME; - - static { - final Map dow = new HashMap<>(); - dow.put(1L, "Mon"); - dow.put(2L, "Tue"); - dow.put(3L, "Wed"); - dow.put(4L, "Thu"); - dow.put(5L, "Fri"); - dow.put(6L, "Sat"); - dow.put(7L, "Sun"); - final Map moy = new HashMap<>(); - moy.put(1L, "Jan"); - moy.put(2L, "Feb"); - moy.put(3L, "Mar"); - moy.put(4L, "Apr"); - moy.put(5L, "May"); - moy.put(6L, "Jun"); - moy.put(7L, "Jul"); - moy.put(8L, "Aug"); - moy.put(9L, "Sep"); - moy.put(10L, "Oct"); - moy.put(11L, "Nov"); - moy.put(12L, "Dec"); - RFC_7231_DATE_TIME = new DateTimeFormatterBuilder() - .parseCaseInsensitive() - .parseLenient() - .optionalStart() - .appendText(DAY_OF_WEEK, dow) - .appendLiteral(", ") - .optionalEnd() - .appendValue(DAY_OF_MONTH, 2, 2, SignStyle.NOT_NEGATIVE) - .appendLiteral(' ') - .appendText(MONTH_OF_YEAR, moy) - .appendLiteral(' ') - .appendValue(YEAR, 4) - .appendLiteral(' ') - .appendValue(HOUR_OF_DAY, 2) - .appendLiteral(':') - .appendValue(MINUTE_OF_HOUR, 2) - .optionalStart() - .appendLiteral(':') - .appendValue(SECOND_OF_MINUTE, 2) - .optionalEnd() - .appendLiteral(' ') - .appendOffset("+HHMM", "GMT") - .toFormatter(Locale.getDefault(Locale.Category.FORMAT)); - } - - private static final String STARTUP_TIME = RFC_7231_DATE_TIME.format(ZonedDateTime.now(ZoneId.of("GMT"))); - /** * Regular expression to test if a string matches the RFC7234 specification for warning headers. This pattern assumes that the warn code * is always 299. Further, this pattern assumes that the warn agent represents a version of Elasticsearch including the build hash. */ - public static Pattern WARNING_HEADER_PATTERN = Pattern.compile( + public static final Pattern WARNING_HEADER_PATTERN = Pattern.compile( "299 " + // warn code "Elasticsearch-\\d+\\.\\d+\\.\\d+(?:-(?:alpha|beta|rc)\\d+)?(?:-SNAPSHOT)?-(?:[a-f0-9]{7}|Unknown) " + // warn agent - "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\" " + // quoted warning value, captured + "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\"( " + // quoted warning value, captured // quoted RFC 1123 date format "\"" + // opening quote "(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // weekday @@ -240,12 +167,11 @@ public void deprecatedAndMaybeLog(final String key, final String msg, final Obje "\\d{4} " + // 4-digit year "\\d{2}:\\d{2}:\\d{2} " + // (two-digit hour):(two-digit minute):(two-digit second) "GMT" + // GMT - "\""); // closing quote + "\")?"); // closing quote (optional, since an older version can still send a warn-date) /** * Extracts the warning value from the value of a warning header that is formatted according to RFC 7234. That is, given a string - * {@code 299 Elasticsearch-6.0.0 "warning value" "Sat, 25 Feb 2017 10:27:43 GMT"}, the return value of this method would be {@code - * warning value}. + * {@code 299 Elasticsearch-6.0.0 "warning value"}, the return value of this method would be {@code warning value}. * * @param s the value of a warning header formatted according to RFC 7234. * @return the extracted warning value @@ -253,23 +179,19 @@ public void deprecatedAndMaybeLog(final String key, final String msg, final Obje public static String extractWarningValueFromWarningHeader(final String s) { /* * We know the exact format of the warning header, so to extract the warning value we can skip forward from the front to the first - * quote, and skip backwards from the end to the penultimate quote: + * quote and we know the last quote is at the end of the string * - * 299 Elasticsearch-6.0.0 "warning value" "Sat, 25, Feb 2017 10:27:43 GMT" - * ^ ^ ^ - * firstQuote penultimateQuote lastQuote - * - * We do it this way rather than seeking forward after the first quote because there could be escaped quotes in the warning value - * but since there are none in the warning date, we can skip backwards to find the quote that closes the quoted warning value. + * 299 Elasticsearch-6.0.0 "warning value" + * ^ ^ + * firstQuote lastQuote * * We parse this manually rather than using the capturing regular expression because the regular expression involves a lot of * backtracking and carries a performance penalty. However, when assertions are enabled, we still use the regular expression to * verify that we are maintaining the warning header format. */ final int firstQuote = s.indexOf('\"'); - final int lastQuote = s.lastIndexOf('\"'); - final int penultimateQuote = s.lastIndexOf('\"', lastQuote - 1); - final String warningValue = s.substring(firstQuote + 1, penultimateQuote - 2); + final int lastQuote = s.length() - 1; + final String warningValue = s.substring(firstQuote + 1, lastQuote); assert assertWarningValue(s, warningValue); return warningValue; } @@ -299,7 +221,6 @@ void deprecated(final Set threadContexts, final String message, f deprecated(threadContexts, message, true, params); } - void deprecated(final Set threadContexts, final String message, final boolean log, final Object... params) { final Iterator iterator = threadContexts.iterator(); @@ -338,9 +259,7 @@ public Void run() { * @return a warning value formatted according to RFC 7234 */ public static String formatWarning(final String s) { - return WARNING_PREFIX + " " - + "\"" + escapeAndEncode(s) + "\"" + " " - + "\"" + STARTUP_TIME + "\""; + return WARNING_PREFIX + " " + "\"" + escapeAndEncode(s) + "\""; } /** @@ -451,7 +370,7 @@ static String encode(final String s) { int startIndex = i; do { i++; - } while (i < s.length() && !doesNotNeedEncoding.get(s.charAt(i))); + } while (i < s.length() && doesNotNeedEncoding.get(s.charAt(i)) == false); final byte[] bytes = s.substring(startIndex, i).getBytes(UTF_8); // noinspection ForLoopReplaceableByForEach From 2ba9e361aba7df6e9c0b5fa8fae3d219d50ddf09 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Tue, 22 Jan 2019 12:38:55 -0500 Subject: [PATCH 16/39] Add helper classes to determine if aggs have a value (#36020) This adds a set of helper classes to determine if an agg "has a value". This is needed because InternalAggs represent "empty" in different manners according to convention. Some use `NaN`, `+/- Inf`, `0.0`, etc. A user can pass the Internal agg type to one of these helper methods and it will report if the agg contains a value or not, which allows the user to differentiate "empty" from a real `NaN`. These helpers are best-effort in some cases. For example, several pipeline aggs share a single return class but use different conventions to mark "empty", so the helper uses the loosest definition that applies to all the aggs that use the class. Sums in particular are unreliable. The InternalSum simply returns 0.0 if the agg is empty (which is correct, no values == sum of zero). But this also means the helper cannot differentiate from "empty" and `+1 + -1`. --- .../MatrixAggregationInspectionHelper.java | 29 +++ .../stats/MatrixStatsAggregatorTests.java | 5 +- .../JoinAggregationInspectionHelper.java | 34 +++ .../ChildrenToParentAggregatorTests.java | 3 + .../ParentToChildrenAggregatorTests.java | 1 + .../bucket/composite/InternalComposite.java | 2 +- .../bucket/filter/InternalFilter.java | 2 +- .../bucket/geogrid/GeoGridBucket.java | 2 +- .../bucket/sampler/InternalSampler.java | 2 +- .../AbstractInternalHDRPercentiles.java | 4 + .../AbstractInternalTDigestPercentiles.java | 4 + .../metrics/InternalCardinality.java | 4 + .../InternalMedianAbsoluteDeviation.java | 2 +- .../metrics/InternalScriptedMetric.java | 4 + .../metrics/MetricInspectionHelper.java | 78 ++++++ .../BucketScriptPipelineAggregator.java | 5 +- .../CumulativeSumPipelineAggregator.java | 3 +- .../pipeline/MovAvgPipelineAggregator.java | 14 +- .../support/AggregationInspectionHelper.java | 242 ++++++++++++++++++ .../bucket/filter/FilterAggregatorTests.java | 7 + .../bucket/filter/FiltersAggregatorTests.java | 11 + .../geogrid/GeoHashGridAggregatorTests.java | 4 + .../AutoDateHistogramAggregatorTests.java | 53 +++- .../DateHistogramAggregatorTests.java | 45 +++- .../histogram/HistogramAggregatorTests.java | 22 +- .../missing/MissingAggregatorTests.java | 13 +- .../bucket/nested/NestedAggregatorTests.java | 31 ++- .../sampler/SamplerAggregatorTests.java | 4 +- .../SignificantTextAggregatorTests.java | 14 +- .../bucket/terms/TermsAggregatorTests.java | 12 + .../metrics/AvgAggregatorTests.java | 10 +- .../metrics/CardinalityAggregatorTests.java | 7 + .../metrics/ExtendedStatsAggregatorTests.java | 4 + .../metrics/GeoBoundsAggregatorTests.java | 3 + .../metrics/GeoCentroidAggregatorTests.java | 4 + .../HDRPercentileRanksAggregatorTests.java | 3 + .../HDRPercentilesAggregatorTests.java | 7 + .../metrics/MaxAggregatorTests.java | 7 + ...edianAbsoluteDeviationAggregatorTests.java | 31 ++- .../metrics/MinAggregatorTests.java | 5 + .../metrics/StatsAggregatorTests.java | 4 + .../metrics/SumAggregatorTests.java | 37 ++- ...TDigestPercentileRanksAggregatorTests.java | 3 + .../TDigestPercentilesAggregatorTests.java | 7 + .../metrics/TopHitsAggregatorTests.java | 7 + .../metrics/ValueCountAggregatorTests.java | 40 ++- .../metrics/WeightedAvgAggregatorTests.java | 13 + .../CumulativeSumAggregatorTests.java | 25 ++ .../extractor/TestMultiValueAggregation.java | 2 +- 49 files changed, 777 insertions(+), 98 deletions(-) create mode 100644 modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java create mode 100644 modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationInspectionHelper.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricInspectionHelper.java create mode 100644 server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java diff --git a/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java new file mode 100644 index 0000000000000..c62c91477cdef --- /dev/null +++ b/modules/aggs-matrix-stats/src/main/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixAggregationInspectionHelper.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.matrix.stats; + +/** + * Counterpart to {@link org.elasticsearch.search.aggregations.support.AggregationInspectionHelper}, providing + * helpers for some aggs in the MatrixStats package + */ +public class MatrixAggregationInspectionHelper { + public static boolean hasValue(InternalMatrixStats agg) { + return agg.getResults() != null; + } +} diff --git a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java index aa778e6f704f9..0512f3d5db3b6 100644 --- a/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java +++ b/modules/aggs-matrix-stats/src/test/java/org/elasticsearch/search/aggregations/matrix/stats/MatrixStatsAggregatorTests.java @@ -53,10 +53,12 @@ public void testNoData() throws Exception { .fields(Collections.singletonList("field")); InternalMatrixStats stats = search(searcher, new MatchAllDocsQuery(), aggBuilder, ft); assertNull(stats.getStats()); + assertFalse(MatrixAggregationInspectionHelper.hasValue(stats)); } } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37587") public void testTwoFields() throws Exception { String fieldA = "a"; MappedFieldType ftA = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); @@ -87,8 +89,9 @@ public void testTwoFields() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); MatrixStatsAggregationBuilder aggBuilder = new MatrixStatsAggregationBuilder("my_agg") .fields(Arrays.asList(fieldA, fieldB)); - InternalMatrixStats stats = search(searcher, new MatchAllDocsQuery(), aggBuilder, ftA, ftB); + InternalMatrixStats stats = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, ftA, ftB); multiPassStats.assertNearlyEqual(new MatrixStatsResults(stats.getStats())); + assertTrue(MatrixAggregationInspectionHelper.hasValue(stats)); } } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationInspectionHelper.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationInspectionHelper.java new file mode 100644 index 0000000000000..08d137f4fb90d --- /dev/null +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/JoinAggregationInspectionHelper.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.join.aggregations; + +/** + * Counterpart to {@link org.elasticsearch.search.aggregations.support.AggregationInspectionHelper}, providing + * helpers for some aggs in the Join package + */ +public class JoinAggregationInspectionHelper { + + public static boolean hasValue(InternalParent agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalChildren agg) { + return agg.getDocCount() > 0; + } +} diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java index 685c872fa72d4..c8c7200df6114 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ChildrenToParentAggregatorTests.java @@ -90,6 +90,7 @@ public void testNoDocs() throws IOException { assertEquals(0, childrenToParent.getDocCount()); assertNotNull("Aggregations: " + childrenToParent.getAggregations().asMap(), parentAggregation); assertEquals(Double.POSITIVE_INFINITY, ((InternalMin) parentAggregation).getValue(), Double.MIN_VALUE); + assertFalse(JoinAggregationInspectionHelper.hasValue(childrenToParent)); }); indexReader.close(); directory.close(); @@ -119,6 +120,7 @@ public void testParentChild() throws IOException { parent.getAggregations().asMap(), expectedTotalParents, parent.getDocCount()); assertEquals(expectedMinValue, ((InternalMin) parent.getAggregations().get("in_parent")).getValue(), Double.MIN_VALUE); + assertTrue(JoinAggregationInspectionHelper.hasValue(parent)); }); // verify for each children @@ -170,6 +172,7 @@ public void testParentChildTerms() throws IOException { // verify a terms-aggregation inside the parent-aggregation testCaseTerms(new MatchAllDocsQuery(), indexSearcher, parent -> { assertNotNull(parent); + assertTrue(JoinAggregationInspectionHelper.hasValue(parent)); LongTerms valueTerms = parent.getAggregations().get("value_terms"); assertNotNull(valueTerms); diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java index 452fe1b490b02..9023f3f0485ba 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/aggregations/ParentToChildrenAggregatorTests.java @@ -105,6 +105,7 @@ public void testParentChild() throws IOException { expectedMinValue = Math.min(expectedMinValue, expectedValues.v2()); } assertEquals(expectedTotalChildren, child.getDocCount()); + assertTrue(JoinAggregationInspectionHelper.hasValue(child)); assertEquals(expectedMinValue, ((InternalMin) child.getAggregations().get("in_child")).getValue(), Double.MIN_VALUE); }); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 26fcfed9a262f..822fdd7ba31a5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -226,7 +226,7 @@ InternalBucket next() { } } - static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket + public static class InternalBucket extends InternalMultiBucketAggregation.InternalBucket implements CompositeAggregation.Bucket, KeyComparable { private final CompositeKey key; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java index 5ee89ee05eef0..f5127c31bce65 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java @@ -50,4 +50,4 @@ public String getWriteableName() { protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { return new InternalFilter(name, docCount, subAggregations, pipelineAggregators(), getMetaData()); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java index 8246d629bd527..850bc6e7bafcb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridBucket.java @@ -34,7 +34,7 @@ import java.util.List; import java.util.Objects; -class GeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoHashGrid.Bucket, Comparable { +public class GeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoHashGrid.Bucket, Comparable { protected long geohashAsLong; protected long docCount; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java index 1a04133e82bfc..da72fec8511be 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java @@ -59,4 +59,4 @@ protected InternalSingleBucketAggregation newAggregation(String name, long docCo InternalAggregations subAggregations) { return new InternalSampler(name, docCount, subAggregations, pipelineAggregators(), metaData); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java index 7050254f2793f..b3bbe6b911c48 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java @@ -98,6 +98,10 @@ public long getEstimatedMemoryFootprint() { return state.getEstimatedFootprintInBytes(); } + DoubleHistogram getState() { + return state; + } + @Override public AbstractInternalHDRPercentiles doReduce(List aggregations, ReduceContext reduceContext) { DoubleHistogram merged = null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java index 6e6ff3cf3a88b..cc63e5f7a4325 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java @@ -82,6 +82,10 @@ public long getEstimatedMemoryFootprint() { return state.byteSize(); } + TDigestState getState() { + return state; + } + @Override public AbstractInternalTDigestPercentiles doReduce(List aggregations, ReduceContext reduceContext) { TDigestState merged = null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java index b3fcb33a4fb84..144777379b5ad 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalCardinality.java @@ -79,6 +79,10 @@ public long getValue() { return counts == null ? 0 : counts.cardinality(0); } + HyperLogLogPlusPlus getCounts() { + return counts; + } + @Override public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { InternalCardinality reduced = null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java index 01ab3a323e12d..ace9edb13f515 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMedianAbsoluteDeviation.java @@ -121,7 +121,7 @@ public String getWriteableName() { return MedianAbsoluteDeviationAggregationBuilder.NAME; } - public TDigestState getValuesSketch() { + TDigestState getValuesSketch() { return valuesSketch; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java index ec2419e03ab3c..a0bb5e74fd5cb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java @@ -80,6 +80,10 @@ public Object aggregation() { return aggregation.get(0); } + List getAggregation() { + return aggregation; + } + @Override public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { List aggregationObjects = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricInspectionHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricInspectionHelper.java new file mode 100644 index 0000000000000..88812cda9899a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricInspectionHelper.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.metrics; + +import org.elasticsearch.search.aggregations.pipeline.InternalDerivative; + +/** + * Counterpart to {@link org.elasticsearch.search.aggregations.support.AggregationInspectionHelper}, providing + * helpers for some aggs that have package-private getters. AggregationInspectionHelper delegates to these + * helpers when needed, and consumers should prefer to use AggregationInspectionHelper instead of these + * helpers. + */ +public class MetricInspectionHelper { + + public static boolean hasValue(InternalAvg agg) { + return agg.getCount() > 0; + } + + public static boolean hasValue(InternalCardinality agg) { + return agg.getCounts() != null; + } + + public static boolean hasValue(InternalHDRPercentileRanks agg) { + return agg.getState().getTotalCount() > 0; + } + + public static boolean hasValue(InternalHDRPercentiles agg) { + return agg.getState().getTotalCount() > 0; + } + + public static boolean hasValue(InternalMedianAbsoluteDeviation agg) { + return agg.getValuesSketch().size() > 0; + } + + public static boolean hasValue(InternalScriptedMetric agg) { + // TODO better way to know if the scripted metric received documents? + // Could check for null too, but a script might return null on purpose... + return agg.getAggregation().size() > 0 ; + } + + public static boolean hasValue(InternalTDigestPercentileRanks agg) { + return agg.getState().size() > 0; + } + + public static boolean hasValue(InternalTDigestPercentiles agg) { + return agg.getState().size() > 0; + } + + public static boolean hasValue(InternalTopHits agg) { + return (agg.getHits().getTotalHits().value == 0 + && Double.isNaN(agg.getHits().getMaxScore()) + && Double.isNaN(agg.getTopDocs().maxScore)) == false; + } + + public static boolean hasValue(InternalWeightedAvg agg) { + return (agg.getSum() == 0.0 && agg.getWeight() == 0L) == false; + } + + public static boolean hasValue(InternalDerivative agg) { + return true; + } +} diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java index d19425e3e0359..996c9b2f0e12f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregator.java @@ -114,7 +114,10 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext } else { final List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map( (p) -> (InternalAggregation) p).collect(Collectors.toList()); - aggs.add(new InternalSimpleValue(name(), returned.doubleValue(), formatter, new ArrayList<>(), metaData())); + + InternalSimpleValue simpleValue = new InternalSimpleValue(name(), returned.doubleValue(), + formatter, new ArrayList<>(), metaData()); + aggs.add(simpleValue); InternalMultiBucketAggregation.InternalBucket newBucket = originalAgg.createBucket(new InternalAggregations(aggs), bucket); newBuckets.add(newBucket); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java index a70144b421a48..54e772b0d8e82 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregator.java @@ -83,8 +83,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext sum += thisBucketValue; } - List aggs = StreamSupport - .stream(bucket.getAggregations().spliterator(), false) + List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) .map((p) -> (InternalAggregation) p) .collect(Collectors.toList()); aggs.add(new InternalSimpleValue(name(), sum, formatter, new ArrayList<>(), metaData())); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java index 10d1cdc5a71cd..1f36c989c163d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregator.java @@ -117,7 +117,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext // since we only change newBucket if we can add to it Bucket newBucket = bucket; - if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) { + if ((thisBucketValue == null || thisBucketValue.equals(Double.NaN)) == false) { // Some models (e.g. HoltWinters) have certain preconditions that must be met if (model.hasValue(values.size())) { @@ -126,7 +126,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext List aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) .map((p) -> (InternalAggregation) p) .collect(Collectors.toList()); - aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList(), metaData())); + aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList<>(), metaData())); newBucket = factory.createBucket(factory.getKey(bucket), bucket.getDocCount(), new InternalAggregations(aggs)); } @@ -153,10 +153,10 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext Bucket bucket = newBuckets.get(lastValidPosition + i + 1); // Get the existing aggs in the bucket so we don't clobber data - aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false).map((p) -> { - return (InternalAggregation) p; - }).collect(Collectors.toList()); - aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList(), metaData())); + aggs = StreamSupport.stream(bucket.getAggregations().spliterator(), false) + .map((p) -> (InternalAggregation) p) + .collect(Collectors.toList()); + aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<>(), metaData())); Bucket newBucket = factory.createBucket(newKey, bucket.getDocCount(), new InternalAggregations(aggs)); @@ -166,7 +166,7 @@ public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext } else { // Not seen before, create fresh aggs = new ArrayList<>(); - aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList(), metaData())); + aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList<>(), metaData())); Bucket newBucket = factory.createBucket(newKey, 0, new InternalAggregations(aggs)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java new file mode 100644 index 0000000000000..c41fa29bde3db --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInspectionHelper.java @@ -0,0 +1,242 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.support; + +import org.elasticsearch.search.aggregations.bucket.adjacency.InternalAdjacencyMatrix; +import org.elasticsearch.search.aggregations.bucket.composite.InternalComposite; +import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; +import org.elasticsearch.search.aggregations.bucket.filter.InternalFilters; +import org.elasticsearch.search.aggregations.bucket.geogrid.InternalGeoHashGrid; +import org.elasticsearch.search.aggregations.bucket.global.InternalGlobal; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.bucket.missing.InternalMissing; +import org.elasticsearch.search.aggregations.bucket.nested.InternalNested; +import org.elasticsearch.search.aggregations.bucket.nested.InternalReverseNested; +import org.elasticsearch.search.aggregations.bucket.range.InternalRange; +import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler; +import org.elasticsearch.search.aggregations.bucket.sampler.UnmappedSampler; +import org.elasticsearch.search.aggregations.bucket.significant.InternalSignificantTerms; +import org.elasticsearch.search.aggregations.bucket.significant.UnmappedSignificantTerms; +import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms; +import org.elasticsearch.search.aggregations.bucket.terms.UnmappedTerms; +import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.metrics.InternalCardinality; +import org.elasticsearch.search.aggregations.metrics.InternalExtendedStats; +import org.elasticsearch.search.aggregations.metrics.InternalGeoBounds; +import org.elasticsearch.search.aggregations.metrics.InternalGeoCentroid; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalMax; +import org.elasticsearch.search.aggregations.metrics.InternalMedianAbsoluteDeviation; +import org.elasticsearch.search.aggregations.metrics.InternalMin; +import org.elasticsearch.search.aggregations.metrics.InternalScriptedMetric; +import org.elasticsearch.search.aggregations.metrics.InternalStats; +import org.elasticsearch.search.aggregations.metrics.InternalSum; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentileRanks; +import org.elasticsearch.search.aggregations.metrics.InternalTDigestPercentiles; +import org.elasticsearch.search.aggregations.metrics.InternalTopHits; +import org.elasticsearch.search.aggregations.metrics.InternalValueCount; +import org.elasticsearch.search.aggregations.metrics.InternalWeightedAvg; +import org.elasticsearch.search.aggregations.metrics.MetricInspectionHelper; +import org.elasticsearch.search.aggregations.pipeline.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.pipeline.InternalPercentilesBucket; +import org.elasticsearch.search.aggregations.pipeline.InternalSimpleValue; + +import java.util.stream.StreamSupport; + +/** + * Provides a set of static helpers to determine if a particular type of InternalAggregation "has a value" + * or not. This can be difficult to determine from an external perspective because each agg uses + * different internal bookkeeping to determine if it is empty or not (NaN, +/-Inf, 0.0, etc). + * + * This set of helpers aim to ease that task by codifying what "empty" is for each agg. + * + * It is not entirely accurate for all aggs, since some do not expose or track the needed state + * (e.g. sum doesn't record count, so it's not clear if the sum is 0.0 because it is empty + * or because of summing to zero). Pipeline aggs in particular are not well supported + * by these helpers since most share InternalSimpleValue and it's not clear which pipeline + * generated the value. + */ +public class AggregationInspectionHelper { + public static , B extends InternalTerms.Bucket> boolean hasValue(InternalTerms agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(UnmappedTerms agg) { + return false; + } + + public static boolean hasValue(UnmappedSignificantTerms agg) { + return false; + } + + public static boolean hasValue(UnmappedSampler agg) { + return false; + } + + public static boolean hasValue(InternalAdjacencyMatrix agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalFilters agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalFilter agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalGeoHashGrid agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalGlobal agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalHistogram agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalDateHistogram agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalAutoDateHistogram agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalComposite agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalMissing agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalNested agg) { + return agg.getDocCount() > 0; + } + + public static boolean hasValue(InternalReverseNested agg) { + return agg.getDocCount() > 0; + } + + public static > boolean hasValue(InternalRange agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalSampler agg) { + return agg.getDocCount() > 0; + } + + public static , + B extends InternalSignificantTerms.Bucket> boolean hasValue(InternalSignificantTerms agg) { + return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); + } + + public static boolean hasValue(InternalAvg agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalSum agg) { + // TODO this could be incorrect... e.g. +1 + -1 + return agg.getValue() != 0.0; + } + + public static boolean hasValue(InternalCardinality agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalExtendedStats agg) { + return agg.getCount() > 0; + } + + public static boolean hasValue(InternalGeoBounds agg) { + return (agg.topLeft() == null && agg.bottomRight() == null) == false; + } + + public static boolean hasValue(InternalGeoCentroid agg) { + return agg.centroid() != null && agg.count() > 0; + } + + public static boolean hasValue(InternalHDRPercentileRanks agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalHDRPercentiles agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalMax agg) { + return agg.getValue() != Double.NEGATIVE_INFINITY; + } + + public static boolean hasValue(InternalMedianAbsoluteDeviation agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalMin agg) { + return agg.getValue() != Double.POSITIVE_INFINITY; + } + + public static boolean hasValue(InternalScriptedMetric agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalStats agg) { + return agg.getCount() > 0; + } + + public static boolean hasValue(InternalTDigestPercentileRanks agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalTDigestPercentiles agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalTopHits agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalValueCount agg) { + return agg.getValue() > 0; + } + + public static boolean hasValue(InternalWeightedAvg agg) { + return MetricInspectionHelper.hasValue(agg); + } + + public static boolean hasValue(InternalSimpleValue agg) { + // This is a coarse approximation, since some aggs use positive/negative infinity or NaN + return (Double.isInfinite(agg.getValue()) || Double.isNaN(agg.getValue())) == false; + } + + public static boolean hasValue(InternalBucketMetricValue agg) { + return Double.isInfinite(agg.value()) == false; + } + + public static boolean hasValue(InternalPercentilesBucket agg) { + return StreamSupport.stream(agg.spliterator(), false).allMatch(p -> Double.isNaN(p.getValue())) == false; + } + +} diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java index d92fb7ff62e43..f5b5d187e4187 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.junit.Before; public class FilterAggregatorTests extends AggregatorTestCase { @@ -57,6 +58,7 @@ public void testEmpty() throws Exception { InternalFilter response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); assertEquals(response.getDocCount(), 0); + assertFalse(AggregationInspectionHelper.hasValue(response)); indexReader.close(); directory.close(); } @@ -96,6 +98,11 @@ public void testRandom() throws Exception { response = search(indexSearcher, new MatchAllDocsQuery(), builder, fieldType); } assertEquals(response.getDocCount(), (long) expectedBucketCount[value]); + if (expectedBucketCount[expectedBucketCount[value]] > 0) { + assertTrue(AggregationInspectionHelper.hasValue(response)); + } else { + assertFalse(AggregationInspectionHelper.hasValue(response)); + } } indexReader.close(); directory.close(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index 05ed091978270..ff5cb84482db0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.junit.Before; import java.util.HashSet; @@ -68,6 +69,7 @@ public void testEmpty() throws Exception { for (InternalFilters.InternalBucket filter : response.getBuckets()) { assertEquals(filter.getDocCount(), 0); } + assertFalse(AggregationInspectionHelper.hasValue(response)); indexReader.close(); directory.close(); } @@ -129,6 +131,7 @@ public void testKeyedFilter() throws Exception { assertEquals(filters.getBucketByKey("bar").getDocCount(), 1); assertEquals(filters.getBucketByKey("same").getDocCount(), 1); assertEquals(filters.getBucketByKey("other").getDocCount(), 2); + assertTrue(AggregationInspectionHelper.hasValue(filters)); } indexReader.close(); @@ -185,14 +188,22 @@ public void testRandom() throws Exception { List buckets = response.getBuckets(); assertEquals(buckets.size(), filters.length+1); + int sum = 0; for (InternalFilters.InternalBucket bucket : buckets) { if ("other".equals(bucket.getKey())) { assertEquals(bucket.getDocCount(), expectedOtherCount); } else { int index = Integer.parseInt(bucket.getKey()); assertEquals(bucket.getDocCount(), (long) expectedBucketCount[filterTerms[index]]); + sum += expectedBucketCount[filterTerms[index]]; } } + if (sum > 0) { + assertTrue(AggregationInspectionHelper.hasValue(response)); + } else { + assertFalse(AggregationInspectionHelper.hasValue(response)); + } + } indexReader.close(); directory.close(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java index 45b6b64cddc35..2d270f8298ff1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregatorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.ArrayList; @@ -53,6 +54,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, geoHashGrid -> { assertEquals(0, geoHashGrid.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(geoHashGrid)); }); } @@ -61,6 +63,7 @@ public void testFieldMissing() throws IOException { iw.addDocument(Collections.singleton(new LatLonDocValuesField(FIELD_NAME, 10D, 10D))); }, geoHashGrid -> { assertEquals(0, geoHashGrid.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(geoHashGrid)); }); } @@ -94,6 +97,7 @@ public void testWithSeveralDocs() throws IOException { for (GeoHashGrid.Bucket bucket : geoHashGrid.getBuckets()) { assertEquals((long) expectedCountPerGeoHash.get(bucket.getKeyAsString()), bucket.getDocCount()); } + assertTrue(AggregationInspectionHelper.hasValue(geoHashGrid)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index 2d5109405dc1c..6b4d1482adb5e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -38,7 +38,8 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.MultiBucketConsumerService; -import org.elasticsearch.search.aggregations.metrics.Stats; +import org.elasticsearch.search.aggregations.metrics.InternalStats; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Matchers; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -75,18 +76,27 @@ public class AutoDateHistogramAggregatorTests extends AggregatorTestCase { public void testMatchNoDocs() throws IOException { testBothCases(new MatchNoDocsQuery(), DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); } public void testMatchAllDocs() throws IOException { testSearchCase(DEFAULT_QUERY, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), - histogram -> assertEquals(10, histogram.getBuckets().size()) + histogram -> { + assertEquals(10, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); testSearchAndReduceCase(DEFAULT_QUERY, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), - histogram -> assertEquals(8, histogram.getBuckets().size()) + histogram -> { + assertEquals(8, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -95,16 +105,18 @@ public void testSubAggregations() throws IOException { aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD) .subAggregation(AggregationBuilders.stats("stats").field(DATE_FIELD)), histogram -> { + assertTrue(AggregationInspectionHelper.hasValue(histogram)); final List buckets = histogram.getBuckets(); assertEquals(8, buckets.size()); Histogram.Bucket bucket = buckets.get(0); assertEquals("2010-01-01T00:00:00.000Z", bucket.getKeyAsString()); assertEquals(2, bucket.getDocCount()); - Stats stats = bucket.getAggregations().get("stats"); + InternalStats stats = bucket.getAggregations().get("stats"); assertEquals("2010-03-12T01:07:45.000Z", stats.getMinAsString()); assertEquals("2010-04-27T03:43:34.000Z", stats.getMaxAsString()); assertEquals(2L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(1); assertEquals("2011-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -113,6 +125,7 @@ public void testSubAggregations() throws IOException { assertTrue(Double.isInfinite(stats.getMin())); assertTrue(Double.isInfinite(stats.getMax())); assertEquals(0L, stats.getCount()); + assertFalse(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(2); assertEquals("2012-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -121,6 +134,7 @@ public void testSubAggregations() throws IOException { assertEquals("2012-05-18T04:11:00.000Z", stats.getMinAsString()); assertEquals("2012-05-18T04:11:00.000Z", stats.getMaxAsString()); assertEquals(1L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(3); assertEquals("2013-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -129,6 +143,7 @@ public void testSubAggregations() throws IOException { assertEquals("2013-05-29T05:11:31.000Z", stats.getMinAsString()); assertEquals("2013-10-31T08:24:05.000Z", stats.getMaxAsString()); assertEquals(2L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(4); assertEquals("2014-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -137,6 +152,7 @@ public void testSubAggregations() throws IOException { assertTrue(Double.isInfinite(stats.getMin())); assertTrue(Double.isInfinite(stats.getMax())); assertEquals(0L, stats.getCount()); + assertFalse(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(5); assertEquals("2015-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -145,6 +161,7 @@ public void testSubAggregations() throws IOException { assertEquals("2015-02-13T13:09:32.000Z", stats.getMinAsString()); assertEquals("2015-11-13T16:14:34.000Z", stats.getMaxAsString()); assertEquals(3L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(6); assertEquals("2016-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -153,6 +170,7 @@ public void testSubAggregations() throws IOException { assertEquals("2016-03-04T17:09:50.000Z", stats.getMinAsString()); assertEquals("2016-03-04T17:09:50.000Z", stats.getMaxAsString()); assertEquals(1L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); bucket = buckets.get(7); assertEquals("2017-01-01T00:00:00.000Z", bucket.getKeyAsString()); @@ -161,6 +179,7 @@ public void testSubAggregations() throws IOException { assertEquals("2017-12-12T22:55:46.000Z", stats.getMinAsString()); assertEquals("2017-12-12T22:55:46.000Z", stats.getMaxAsString()); assertEquals(1L, stats.getCount()); + assertTrue(AggregationInspectionHelper.hasValue(stats)); }); } @@ -169,7 +188,10 @@ public void testNoDocs() throws IOException { final Consumer aggregation = agg -> agg.setNumBuckets(10).field(DATE_FIELD); testSearchCase(DEFAULT_QUERY, dates, aggregation, - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); testSearchAndReduceCase(DEFAULT_QUERY, dates, aggregation, Assert::assertNull @@ -179,7 +201,10 @@ public void testNoDocs() throws IOException { public void testAggregateWrongField() throws IOException { testBothCases(DEFAULT_QUERY, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(10).field("wrong_field"), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -197,6 +222,7 @@ public void testIntervalYear() throws IOException { assertEquals(DATES_WITH_TIME.get(5 + i), bucket.getKey()); assertEquals(1, bucket.getDocCount()); } + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); testSearchAndReduceCase(rangeQuery, DATES_WITH_TIME, @@ -211,6 +237,7 @@ public void testIntervalYear() throws IOException { assertEquals(expectedDocCount.size(), buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); } @@ -244,6 +271,7 @@ public void testIntervalMonth() throws IOException { assertEquals(expectedDocCount.size(), buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); } @@ -287,6 +315,7 @@ public void testIntervalDay() throws IOException { assertEquals(5, buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKey(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } ); } @@ -311,6 +340,7 @@ public void testIntervalDayWithTZ() throws IOException { assertEquals(expectedDocCount.size(), buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKeyAsString(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); }); testSearchAndReduceCase(DEFAULT_QUERY, datesForDayInterval, aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { @@ -323,6 +353,7 @@ public void testIntervalDayWithTZ() throws IOException { assertEquals(5, buckets.size()); buckets.forEach(bucket -> assertEquals(expectedDocCount.getOrDefault(bucket.getKeyAsString(), 0).longValue(), bucket.getDocCount())); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); }); } @@ -683,19 +714,19 @@ public void testIntervalSecond() throws IOException { private void testSearchCase(final Query query, final List dataset, final Consumer configure, - final Consumer verify) throws IOException { + final Consumer verify) throws IOException { executeTestCase(false, query, dataset, configure, verify); } private void testSearchAndReduceCase(final Query query, final List dataset, final Consumer configure, - final Consumer verify) throws IOException { + final Consumer verify) throws IOException { executeTestCase(true, query, dataset, configure, verify); } private void testBothCases(final Query query, final List dataset, final Consumer configure, - final Consumer verify) throws IOException { + final Consumer verify) throws IOException { executeTestCase(false, query, dataset, configure, verify); executeTestCase(true, query, dataset, configure, verify); } @@ -716,7 +747,7 @@ protected IndexSettings createIndexSettings() { private void executeTestCase(final boolean reduced, final Query query, final List dataset, final Consumer configure, - final Consumer verify) throws IOException { + final Consumer verify) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { final Document document = new Document(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 4e6004444a63e..c1b9396664a22 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.MultiBucketConsumerService.TooManyBucketsException; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -61,7 +62,10 @@ public class DateHistogramAggregatorTests extends AggregatorTestCase { public void testMatchNoDocs() throws IOException { testBothCases(new MatchNoDocsQuery(), dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -70,15 +74,24 @@ public void testMatchAllDocs() throws IOException { testSearchCase(query, dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(6, histogram.getBuckets().size()) + histogram -> { + assertEquals(6, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); testSearchAndReduceCase(query, dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD), - histogram -> assertEquals(8, histogram.getBuckets().size()) + histogram -> { + assertEquals(8, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); testBothCases(query, dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD).minDocCount(1L), - histogram -> assertEquals(6, histogram.getBuckets().size()) + histogram -> { + assertEquals(6, histogram.getBuckets().size()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -89,7 +102,10 @@ public void testNoDocs() throws IOException { agg.dateHistogramInterval(DateHistogramInterval.YEAR).field(DATE_FIELD); testSearchCase(query, dates, aggregation, - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); testSearchAndReduceCase(query, dates, aggregation, histogram -> assertNull(histogram) @@ -99,7 +115,10 @@ public void testNoDocs() throws IOException { public void testAggregateWrongField() throws IOException { testBothCases(new MatchAllDocsQuery(), dataset, aggregation -> aggregation.dateHistogramInterval(DateHistogramInterval.YEAR).field("wrong_field"), - histogram -> assertEquals(0, histogram.getBuckets().size()) + histogram -> { + assertEquals(0, histogram.getBuckets().size()); + assertFalse(AggregationInspectionHelper.hasValue(histogram)); + } ); } @@ -371,39 +390,39 @@ public void testMaxBucket() throws IOException { private void testSearchCase(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { + Consumer verify) throws IOException { testSearchCase(query, dataset, configure, verify, 10000); } private void testSearchCase(Query query, List dataset, Consumer configure, - Consumer verify, + Consumer verify, int maxBucket) throws IOException { executeTestCase(false, query, dataset, configure, verify, maxBucket); } private void testSearchAndReduceCase(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { + Consumer verify) throws IOException { testSearchAndReduceCase(query, dataset, configure, verify, 1000); } private void testSearchAndReduceCase(Query query, List dataset, Consumer configure, - Consumer verify, + Consumer verify, int maxBucket) throws IOException { executeTestCase(true, query, dataset, configure, verify, maxBucket); } private void testBothCases(Query query, List dataset, Consumer configure, - Consumer verify) throws IOException { + Consumer verify) throws IOException { testBothCases(query, dataset, configure, verify, 10000); } private void testBothCases(Query query, List dataset, Consumer configure, - Consumer verify, + Consumer verify, int maxBucket) throws IOException { testSearchCase(query, dataset, configure, verify, maxBucket); testSearchAndReduceCase(query, dataset, configure, verify, maxBucket); @@ -411,7 +430,7 @@ private void testBothCases(Query query, List dataset, private void executeTestCase(boolean reduced, Query query, List dataset, Consumer configure, - Consumer verify, + Consumer verify, int maxBucket) throws IOException { try (Directory directory = newDirectory()) { diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java index 81ffd1c78a3f7..bdb99a4971ac1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregatorTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; public class HistogramAggregatorTests extends AggregatorTestCase { @@ -49,7 +50,7 @@ public void testLongs() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(4, histogram.getBuckets().size()); assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); @@ -59,6 +60,7 @@ public void testLongs() throws Exception { assertEquals(2, histogram.getBuckets().get(2).getDocCount()); assertEquals(50d, histogram.getBuckets().get(3).getKey()); assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -79,7 +81,7 @@ public void testDoubles() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(4, histogram.getBuckets().size()); assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); @@ -89,6 +91,7 @@ public void testDoubles() throws Exception { assertEquals(2, histogram.getBuckets().get(2).getDocCount()); assertEquals(50d, histogram.getBuckets().get(3).getKey()); assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -109,7 +112,7 @@ public void testIrrationalInterval() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(4, histogram.getBuckets().size()); assertEquals(-4 * Math.PI, histogram.getBuckets().get(0).getKey()); assertEquals(1, histogram.getBuckets().get(0).getDocCount()); @@ -119,6 +122,7 @@ public void testIrrationalInterval() throws Exception { assertEquals(2, histogram.getBuckets().get(2).getDocCount()); assertEquals(Math.PI, histogram.getBuckets().get(3).getKey()); assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -140,12 +144,13 @@ public void testMinDocCount() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(2, histogram.getBuckets().size()); assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); assertEquals(0d, histogram.getBuckets().get(1).getKey()); assertEquals(3, histogram.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -168,7 +173,7 @@ public void testMissing() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(4, histogram.getBuckets().size()); assertEquals(-10d, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); @@ -178,6 +183,7 @@ public void testMissing() throws Exception { assertEquals(2, histogram.getBuckets().get(2).getDocCount()); assertEquals(50d, histogram.getBuckets().get(3).getKey()); assertEquals(1, histogram.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -199,7 +205,7 @@ public void testOffset() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(3, histogram.getBuckets().size()); assertEquals(-10 + Math.PI, histogram.getBuckets().get(0).getKey()); assertEquals(2, histogram.getBuckets().get(0).getDocCount()); @@ -207,6 +213,7 @@ public void testOffset() throws Exception { assertEquals(2, histogram.getBuckets().get(1).getDocCount()); assertEquals(5 + Math.PI, histogram.getBuckets().get(2).getKey()); assertEquals(1, histogram.getBuckets().get(2).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } @@ -228,7 +235,7 @@ public void testExtendedBounds() throws Exception { fieldType.setName("field"); try (IndexReader reader = w.getReader()) { IndexSearcher searcher = new IndexSearcher(reader); - Histogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); + InternalHistogram histogram = searchAndReduce(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertEquals(6, histogram.getBuckets().size()); assertEquals(-15d, histogram.getBuckets().get(0).getKey()); assertEquals(0, histogram.getBuckets().get(0).getDocCount()); @@ -242,6 +249,7 @@ public void testExtendedBounds() throws Exception { assertEquals(0, histogram.getBuckets().get(4).getDocCount()); assertEquals(10d, histogram.getBuckets().get(5).getKey()); assertEquals(0, histogram.getBuckets().get(5).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue(histogram)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java index 424c3aed2105d..daaeb94d8fae9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregatorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -45,7 +46,10 @@ public void testMatchNoDocs() throws IOException { "field", Queries.newMatchAllQuery(), doc -> doc.add(new SortedNumericDocValuesField("field", randomLong())), - internalMissing -> assertEquals(internalMissing.getDocCount(), 0)); + internalMissing -> { + assertEquals(internalMissing.getDocCount(), 0); + assertFalse(AggregationInspectionHelper.hasValue(internalMissing)); + }); } public void testMatchAllDocs() throws IOException { @@ -54,7 +58,10 @@ public void testMatchAllDocs() throws IOException { "field", Queries.newMatchAllQuery(), doc -> doc.add(new SortedNumericDocValuesField("another_field", randomLong())), - internalMissing -> assertEquals(internalMissing.getDocCount(), numDocs)); + internalMissing -> { + assertEquals(internalMissing.getDocCount(), numDocs); + assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); + }); } public void testMatchSparse() throws IOException { @@ -74,6 +81,7 @@ public void testMatchSparse() throws IOException { internalMissing -> { assertEquals(internalMissing.getDocCount(), count.get()); count.set(0); + assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); }); } @@ -87,6 +95,7 @@ public void testMissingField() throws IOException { }, internalMissing -> { assertEquals(internalMissing.getDocCount(), numDocs); + assertTrue(AggregationInspectionHelper.hasValue(internalMissing)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java index e9911a7034c2f..1eef8de86b304 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalSum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.test.VersionUtils; @@ -115,16 +116,16 @@ public void testNoDocs() throws IOException { NumberFieldMapper.NumberType.LONG); fieldType.setName(VALUE_FIELD_NAME); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType); assertEquals(NESTED_AGG, nested.getName()); assertEquals(0, nested.getDocCount()); - InternalMax max = (InternalMax) - ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + InternalMax max = (InternalMax) nested.getProperty(MAX_AGG_NAME); assertEquals(MAX_AGG_NAME, max.getName()); assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), Double.MIN_VALUE); + assertFalse(AggregationInspectionHelper.hasValue(nested)); } } } @@ -162,17 +163,18 @@ public void testSingleNestingMax() throws IOException { NumberFieldMapper.NumberType.LONG); fieldType.setName(VALUE_FIELD_NAME); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType); assertEquals(expectedNestedDocs, nested.getDocCount()); assertEquals(NESTED_AGG, nested.getName()); assertEquals(expectedNestedDocs, nested.getDocCount()); - InternalMax max = (InternalMax) - ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + InternalMax max = (InternalMax) nested.getProperty(MAX_AGG_NAME); assertEquals(MAX_AGG_NAME, max.getName()); assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); + + assertTrue(AggregationInspectionHelper.hasValue(nested)); } } } @@ -211,17 +213,18 @@ public void testDoubleNestingMax() throws IOException { NumberFieldMapper.NumberType.LONG); fieldType.setName(VALUE_FIELD_NAME); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType); assertEquals(expectedNestedDocs, nested.getDocCount()); assertEquals(NESTED_AGG, nested.getName()); assertEquals(expectedNestedDocs, nested.getDocCount()); - InternalMax max = (InternalMax) - ((InternalAggregation)nested).getProperty(MAX_AGG_NAME); + InternalMax max = (InternalMax) nested.getProperty(MAX_AGG_NAME); assertEquals(MAX_AGG_NAME, max.getName()); assertEquals(expectedMaxValue, max.getValue(), Double.MIN_VALUE); + + assertTrue(AggregationInspectionHelper.hasValue(nested)); } } } @@ -263,7 +266,7 @@ public void testOrphanedDocs() throws IOException { NumberFieldMapper.NumberType.LONG); fieldType.setName(VALUE_FIELD_NAME); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), nestedBuilder, fieldType); assertEquals(expectedNestedDocs, nested.getDocCount()); @@ -348,13 +351,15 @@ public void testResetRootDocId() throws Exception { bq.add(Queries.newNonNestedFilter(VersionUtils.randomVersion(random())), BooleanClause.Occur.MUST); bq.add(new TermQuery(new Term(IdFieldMapper.NAME, Uid.encodeId("2"))), BooleanClause.Occur.MUST_NOT); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new ConstantScoreQuery(bq.build()), nestedBuilder, fieldType); assertEquals(NESTED_AGG, nested.getName()); // The bug manifests if 6 docs are returned, because currentRootDoc isn't reset the previous child docs from the first // segment are emitted as hits. assertEquals(4L, nested.getDocCount()); + + assertTrue(AggregationInspectionHelper.hasValue(nested)); } } } @@ -630,7 +635,7 @@ public void testPreGetChildLeafCollectors() throws IOException { assertEquals("filterAgg", filter.getName()); assertEquals(3L, filter.getDocCount()); - Nested nested = filter.getAggregations().get(NESTED_AGG); + InternalNested nested = filter.getAggregations().get(NESTED_AGG); assertEquals(6L, nested.getDocCount()); StringTerms keyAgg = nested.getAggregations().get("key"); @@ -687,7 +692,7 @@ public void testFieldAlias() throws IOException { NestedAggregationBuilder aliasAgg = nested(NESTED_AGG, NESTED_OBJECT).subAggregation( max(MAX_AGG_NAME).field(VALUE_FIELD_NAME + "-alias")); - Nested nested = search(newSearcher(indexReader, false, true), + InternalNested nested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), agg, fieldType); Nested aliasNested = search(newSearcher(indexReader, false, true), new MatchAllDocsQuery(), aliasAgg, fieldType); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java index e446dfb3d2b9a..52988764d3bc3 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregatorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.metrics.Min; import org.elasticsearch.search.aggregations.metrics.MinAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; @@ -75,10 +76,11 @@ public void testSampler() throws IOException { try (IndexReader reader = DirectoryReader.open(w)) { assertEquals("test expects a single segment", 1, reader.leaves().size()); IndexSearcher searcher = new IndexSearcher(reader); - Sampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "good")), aggBuilder, textFieldType, + InternalSampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "good")), aggBuilder, textFieldType, numericFieldType); Min min = sampler.getAggregations().get("min"); assertEquals(5.0, min.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(sampler)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java index dbff6daed6285..123bb0f60427c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTextAggregatorTests.java @@ -37,8 +37,9 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.bucket.sampler.Sampler; +import org.elasticsearch.search.aggregations.bucket.sampler.InternalSampler; import org.elasticsearch.search.aggregations.bucket.sampler.SamplerAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -88,7 +89,7 @@ public void testSignificance() throws IOException { IndexSearcher searcher = new IndexSearcher(reader); // Search "odd" which should have no duplication - Sampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), aggBuilder, textFieldType); + InternalSampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), aggBuilder, textFieldType); SignificantTerms terms = sampler.getAggregations().get("sig_text"); assertNull(terms.getBucketByKey("even")); @@ -109,6 +110,7 @@ public void testSignificance() throws IOException { assertNotNull(terms.getBucketByKey("even")); + assertTrue(AggregationInspectionHelper.hasValue(sampler)); } } } @@ -142,8 +144,9 @@ public void testFieldAlias() throws IOException { SamplerAggregationBuilder samplerAgg = sampler("sampler").subAggregation(agg); SamplerAggregationBuilder aliasSamplerAgg = sampler("sampler").subAggregation(aliasAgg); - Sampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), samplerAgg, textFieldType); - Sampler aliasSampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), aliasSamplerAgg, textFieldType); + InternalSampler sampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), samplerAgg, textFieldType); + InternalSampler aliasSampler = searchAndReduce(searcher, new TermQuery(new Term("text", "odd")), + aliasSamplerAgg, textFieldType); SignificantTerms terms = sampler.getAggregations().get("sig_text"); SignificantTerms aliasTerms = aliasSampler.getAggregations().get("sig_text"); @@ -157,6 +160,9 @@ public void testFieldAlias() throws IOException { aliasTerms = aliasSampler.getAggregations().get("sig_text"); assertFalse(terms.getBuckets().isEmpty()); assertEquals(terms, aliasTerms); + + assertTrue(AggregationInspectionHelper.hasValue(sampler)); + assertTrue(AggregationInspectionHelper.hasValue(aliasSampler)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index d8948c1061369..8acf78d301af5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -73,6 +73,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.BucketScriptPipelineAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; @@ -199,6 +200,7 @@ public void testSimple() throws Exception { assertEquals(1L, result.getBuckets().get(3).getDocCount()); assertEquals("d", result.getBuckets().get(4).getKeyAsString()); assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); } } } @@ -278,6 +280,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(8).getDocCount()); assertEquals("val009", result.getBuckets().get(9).getKeyAsString()); assertEquals(1L, result.getBuckets().get(9).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); MappedFieldType fieldType2 = new KeywordFieldMapper.KeywordFieldType(); fieldType2.setName("sv_field"); @@ -304,6 +307,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(3).getDocCount()); assertEquals("val009", result.getBuckets().get(4).getKeyAsString()); assertEquals(1L, result.getBuckets().get(4).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionHint) @@ -333,6 +337,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(6).getDocCount()); assertEquals("val009", result.getBuckets().get(7).getKeyAsString()); assertEquals(1L, result.getBuckets().get(7).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionHint) @@ -349,6 +354,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("val011", result.getBuckets().get(1).getKeyAsString()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionHint) @@ -365,6 +371,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("val010", result.getBuckets().get(1).getKeyAsString()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.STRING) .executionHint(executionHint) @@ -382,6 +389,7 @@ public void testStringIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals("val010", result.getBuckets().get(1).getKeyAsString()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); } } } @@ -436,6 +444,7 @@ public void testNumericIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals(5L, result.getBuckets().get(1).getKey()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.LONG) .executionHint(executionHint) @@ -456,6 +465,7 @@ public void testNumericIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(2).getDocCount()); assertEquals(4L, result.getBuckets().get(3).getKey()); assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); fieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.DOUBLE); fieldType.setName("double_field"); @@ -475,6 +485,7 @@ public void testNumericIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(0).getDocCount()); assertEquals(5.0, result.getBuckets().get(1).getKey()); assertEquals(1L, result.getBuckets().get(1).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); aggregationBuilder = new TermsAggregationBuilder("_name", ValueType.DOUBLE) .executionHint(executionHint) @@ -495,6 +506,7 @@ public void testNumericIncludeExclude() throws Exception { assertEquals(1L, result.getBuckets().get(2).getDocCount()); assertEquals(4.0, result.getBuckets().get(3).getKey()); assertEquals(1L, result.getBuckets().get(3).getDocCount()); + assertTrue(AggregationInspectionHelper.hasValue((InternalTerms)result)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java index b83acfcba80ec..3e86571ae45e9 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/AvgAggregatorTests.java @@ -35,9 +35,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.AvgAggregator; -import org.elasticsearch.search.aggregations.metrics.InternalAvg; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -52,6 +50,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -61,6 +60,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 3))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -71,6 +71,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("number", 3))); }, avg -> { assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -81,6 +82,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { iw.addDocument(singleton(new NumericDocValuesField("number", 3))); }, avg -> { assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -91,6 +93,7 @@ public void testQueryFiltering() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("number", 3), new SortedNumericDocValuesField("number", 3))); }, avg -> { assertEquals(2.5, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -101,6 +104,7 @@ public void testQueryFiltersAll() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("number", 3), new SortedNumericDocValuesField("number", 7))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java index a2789a9ef1648..4f5a8bb1ea484 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregatorTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -48,6 +49,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, card -> { assertEquals(0.0, card.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(card)); }); } @@ -57,6 +59,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); }, card -> { assertEquals(0.0, card.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(card)); }); } @@ -66,6 +69,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("number", 1))); }, card -> { assertEquals(2, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); }); } @@ -75,6 +79,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { iw.addDocument(singleton(new NumericDocValuesField("number", 1))); }, card -> { assertEquals(2, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); }); } @@ -86,6 +91,7 @@ public void testQueryFiltering() throws IOException { new SortedNumericDocValuesField("number", 1))); }, card -> { assertEquals(1, card.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(card)); }); } @@ -97,6 +103,7 @@ public void testQueryFiltersAll() throws IOException { new SortedNumericDocValuesField("number", 1))); }, card -> { assertEquals(0.0, card.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(card)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java index e65d1269520bc..ca26ba1b20672 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregatorTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.function.Consumer; @@ -55,6 +56,7 @@ public void testEmpty() throws IOException { assertEquals(Double.NaN, stats.getVariance(), 0); assertEquals(Double.NaN, stats.getStdDeviation(), 0); assertEquals(0d, stats.getSumOfSquares(), 0); + assertFalse(AggregationInspectionHelper.hasValue(stats)); } ); } @@ -92,6 +94,7 @@ public void testRandomDoubles() throws IOException { stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), TOLERANCE); assertEquals(expected.stdDevBound(ExtendedStats.Bounds.UPPER, stats.getSigma()), stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER), TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(stats)); } ); } @@ -128,6 +131,7 @@ public void testRandomLongs() throws IOException { stats.getStdDeviationBound(ExtendedStats.Bounds.LOWER), TOLERANCE); assertEquals(expected.stdDevBound(ExtendedStats.Bounds.UPPER, stats.getSigma()), stats.getStdDeviationBound(ExtendedStats.Bounds.UPPER), TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(stats)); } ); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java index b171e7436eee4..562d29416dcd8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregatorTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.test.geo.RandomGeoGenerator; import static org.elasticsearch.search.aggregations.metrics.InternalGeoBoundsTests.GEOHASH_TOLERANCE; @@ -55,6 +56,7 @@ public void testEmpty() throws Exception { assertTrue(Double.isInfinite(bounds.posRight)); assertTrue(Double.isInfinite(bounds.negLeft)); assertTrue(Double.isInfinite(bounds.negRight)); + assertFalse(AggregationInspectionHelper.hasValue(bounds)); } } } @@ -112,6 +114,7 @@ public void testRandom() throws Exception { assertThat(bounds.posRight, closeTo(posRight, GEOHASH_TOLERANCE)); assertThat(bounds.negRight, closeTo(negRight, GEOHASH_TOLERANCE)); assertThat(bounds.negLeft, closeTo(negLeft, GEOHASH_TOLERANCE)); + assertTrue(AggregationInspectionHelper.hasValue(bounds)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java index 3865070741258..303ed65f44856 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregatorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.metrics.GeoCentroidAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalGeoCentroid; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.test.geo.RandomGeoGenerator; import java.io.IOException; @@ -52,6 +53,7 @@ public void testEmpty() throws Exception { IndexSearcher searcher = new IndexSearcher(reader); InternalGeoCentroid result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertNull(result.centroid()); + assertFalse(AggregationInspectionHelper.hasValue(result)); } } } @@ -79,6 +81,7 @@ public void testUnmapped() throws Exception { fieldType.setName("field"); result = search(searcher, new MatchAllDocsQuery(), aggBuilder, fieldType); assertNull(result.centroid()); + assertFalse(AggregationInspectionHelper.hasValue(result)); } } } @@ -149,6 +152,7 @@ private void assertCentroid(RandomIndexWriter w, GeoPoint expectedCentroid) thro assertNotNull(centroid); assertEquals(expectedCentroid.getLat(), centroid.getLat(), GEOHASH_TOLERANCE); assertEquals(expectedCentroid.getLon(), centroid.getLon(), GEOHASH_TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(result)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java index 52bd6a37e6f6f..9d9c74f283b45 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentileRanksAggregatorTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.metrics.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.PercentileRanksAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Matchers; import java.io.IOException; @@ -55,6 +56,7 @@ public void testEmpty() throws IOException { Percentile rank = ranks.iterator().next(); assertEquals(Double.NaN, rank.getPercent(), 0d); assertEquals(0.5, rank.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue((InternalHDRPercentileRanks)ranks)); } } @@ -87,6 +89,7 @@ public void testSimple() throws IOException { assertEquals(12, rank.getValue(), 0d); assertThat(rank.getPercent(), Matchers.equalTo(100d)); assertFalse(rankIterator.hasNext()); + assertTrue(AggregationInspectionHelper.hasValue((InternalHDRPercentileRanks)ranks)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java index b68b68dd544ea..f08a89657c63b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/HDRPercentilesAggregatorTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalHDRPercentiles; import org.elasticsearch.search.aggregations.metrics.PercentilesAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.function.Consumer; @@ -52,6 +53,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, hdr -> { assertEquals(0L, hdr.state.getTotalCount()); + assertFalse(AggregationInspectionHelper.hasValue(hdr)); }); } @@ -61,6 +63,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); }, hdr -> { assertEquals(0L, hdr.state.getTotalCount()); + assertFalse(AggregationInspectionHelper.hasValue(hdr)); }); } @@ -77,6 +80,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { assertEquals(20.0d, hdr.percentile(50), approximation); assertEquals(40.0d, hdr.percentile(75), approximation); assertEquals(60.0d, hdr.percentile(99), approximation); + assertTrue(AggregationInspectionHelper.hasValue(hdr)); }); } @@ -93,6 +97,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { assertEquals(20.0d, hdr.percentile(50), approximation); assertEquals(40.0d, hdr.percentile(75), approximation); assertEquals(60.0d, hdr.percentile(99), approximation); + assertTrue(AggregationInspectionHelper.hasValue(hdr)); }); } @@ -107,10 +112,12 @@ public void testQueryFiltering() throws IOException { testCase(LongPoint.newRangeQuery("row", 0, 2), docs, hdr -> { assertEquals(2L, hdr.state.getTotalCount()); assertEquals(10.0d, hdr.percentile(randomDoubleBetween(1, 50, true)), 0.05d); + assertTrue(AggregationInspectionHelper.hasValue(hdr)); }); testCase(LongPoint.newRangeQuery("row", 5, 10), docs, hdr -> { assertEquals(0L, hdr.state.getTotalCount()); + assertFalse(AggregationInspectionHelper.hasValue(hdr)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java index 013f951cc3f2c..d962178661272 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MaxAggregatorTests.java @@ -49,6 +49,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.ArrayList; @@ -69,6 +70,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, max -> { assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(max)); }); } @@ -78,6 +80,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); }, max -> { assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(max)); }); } @@ -87,6 +90,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("number", 1))); }, max -> { assertEquals(7, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); }); } @@ -96,6 +100,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { iw.addDocument(singleton(new NumericDocValuesField("number", 1))); }, max -> { assertEquals(7, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); }); } @@ -105,6 +110,7 @@ public void testQueryFiltering() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("number", 1), new SortedNumericDocValuesField("number", 1))); }, max -> { assertEquals(1, max.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(max)); }); } @@ -114,6 +120,7 @@ public void testQueryFiltersAll() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("number", 1), new SortedNumericDocValuesField("number", 1))); }, max -> { assertEquals(Double.NEGATIVE_INFINITY, max.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(max)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java index d47d9006b7f38..55cf9b16e1688 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregatorTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; @@ -71,7 +72,10 @@ private static CheckedConsumer {}, agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN))); + testCase(new MatchAllDocsQuery(), writer -> {}, agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + assertFalse(AggregationInspectionHelper.hasValue(agg)); + }); } public void testNoMatchingField() throws IOException { @@ -81,7 +85,10 @@ public void testNoMatchingField() throws IOException { writer.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); writer.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 2))); }, - agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + assertFalse(AggregationInspectionHelper.hasValue(agg)); + } ); } @@ -94,7 +101,10 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { sample.add(point); return singleton(new SortedNumericDocValuesField("number", point)); }), - agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))); + assertTrue(AggregationInspectionHelper.hasValue(agg)); + } ); } @@ -107,7 +117,10 @@ public void testSomeMatchesNumericDocValues() throws IOException { sample.add(point); return singleton(new NumericDocValuesField("number", point)); }), - agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(sample))); + assertTrue(AggregationInspectionHelper.hasValue(agg)); + } ); } @@ -123,7 +136,10 @@ public void testQueryFiltering() throws IOException { writer.addDocument(Arrays.asList(new IntPoint("number", point), new SortedNumericDocValuesField("number", point))); } }, - agg -> assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample))) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), closeToRelative(calculateMAD(filteredSample))); + assertTrue(AggregationInspectionHelper.hasValue(agg)); + } ); } @@ -134,7 +150,10 @@ public void testQueryFiltersAll() throws IOException { writer.addDocument(Arrays.asList(new IntPoint("number", 1), new SortedNumericDocValuesField("number", 1))); writer.addDocument(Arrays.asList(new IntPoint("number", 2), new SortedNumericDocValuesField("number", 2))); }, - agg -> assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)) + agg -> { + assertThat(agg.getMedianAbsoluteDeviation(), equalTo(Double.NaN)); + assertFalse(AggregationInspectionHelper.hasValue(agg)); + } ); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java index ad897a2ef3264..cfe3c86034f85 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/MinAggregatorTests.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.FieldContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; @@ -116,6 +117,7 @@ public void testMinAggregator_numericDv() throws Exception { aggregator.postCollection(); InternalMin result = (InternalMin) aggregator.buildAggregation(0L); assertEquals(-1.0, result.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(result)); indexReader.close(); directory.close(); @@ -157,6 +159,7 @@ public void testMinAggregator_sortedNumericDv() throws Exception { aggregator.postCollection(); InternalMin result = (InternalMin) aggregator.buildAggregation(0L); assertEquals(-1.0, result.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(result)); indexReader.close(); directory.close(); @@ -189,6 +192,7 @@ public void testMinAggregator_noValue() throws Exception { aggregator.postCollection(); InternalMin result = (InternalMin) aggregator.buildAggregation(0L); assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(result)); indexReader.close(); directory.close(); @@ -212,6 +216,7 @@ public void testMinAggregator_noDocs() throws Exception { aggregator.postCollection(); InternalMin result = (InternalMin) aggregator.buildAggregation(0L); assertEquals(Double.POSITIVE_INFINITY, result.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(result)); indexReader.close(); directory.close(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java index 52a45f9c017d1..28b1514545506 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/StatsAggregatorTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.function.Consumer; @@ -51,6 +52,7 @@ public void testEmpty() throws IOException { assertEquals(Float.NaN, stats.getAvg(), 0); assertEquals(Double.POSITIVE_INFINITY, stats.getMin(), 0); assertEquals(Double.NEGATIVE_INFINITY, stats.getMax(), 0); + assertFalse(AggregationInspectionHelper.hasValue(stats)); } ); } @@ -81,6 +83,7 @@ public void testRandomDoubles() throws IOException { assertEquals(expected.min, stats.getMin(), 0); assertEquals(expected.max, stats.getMax(), 0); assertEquals(expected.sum / expected.count, stats.getAvg(), TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(stats)); } ); } @@ -110,6 +113,7 @@ public void testRandomLongs() throws IOException { assertEquals(expected.min, stats.getMin(), 0); assertEquals(expected.max, stats.getMax(), 0); assertEquals(expected.sum / expected.count, stats.getAvg(), TOLERANCE); + assertTrue(AggregationInspectionHelper.hasValue(stats)); } ); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java index eb57bc9a5115c..ff76aa4d0edef 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/SumAggregatorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -53,14 +54,20 @@ public class SumAggregatorTests extends AggregatorTestCase { public void testNoDocs() throws IOException { testCase(new MatchAllDocsQuery(), iw -> { // Intentionally not writing any docs - }, count -> assertEquals(0L, count.getValue(), 0d)); + }, count -> { + assertEquals(0L, count.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } public void testNoMatchingField() throws IOException { testCase(new MatchAllDocsQuery(), iw -> { iw.addDocument(singleton(new NumericDocValuesField("wrong_number", 7))); iw.addDocument(singleton(new NumericDocValuesField("wrong_number", 1))); - }, count -> assertEquals(0L, count.getValue(), 0d)); + }, count -> { + assertEquals(0L, count.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } public void testNumericDocValues() throws IOException { @@ -81,7 +88,10 @@ public void testNumericDocValues() throws IOException { iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2))); iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1))); iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 2))); - }, count -> assertEquals(24L, count.getValue(), 0d)); + }, count -> { + assertEquals(24L, count.getValue(), 0d); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testSortedNumericDocValues() throws IOException { @@ -91,7 +101,10 @@ public void testSortedNumericDocValues() throws IOException { iw.addDocument(Arrays.asList(new SortedNumericDocValuesField(FIELD_NAME, 3), new SortedNumericDocValuesField(FIELD_NAME, 4))); iw.addDocument(singleton(new SortedNumericDocValuesField(FIELD_NAME, 1))); - }, count -> assertEquals(15L, count.getValue(), 0d)); + }, count -> { + assertEquals(15L, count.getValue(), 0d); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testQueryFiltering() throws IOException { @@ -101,14 +114,20 @@ public void testQueryFiltering() throws IOException { iw.addDocument(Arrays.asList(new StringField("match", "yes", Field.Store.NO), new NumericDocValuesField(FIELD_NAME, 3))); iw.addDocument(Arrays.asList(new StringField("match", "no", Field.Store.NO), new NumericDocValuesField(FIELD_NAME, 4))); iw.addDocument(Arrays.asList(new StringField("match", "yes", Field.Store.NO), new NumericDocValuesField(FIELD_NAME, 5))); - }, count -> assertEquals(9L, count.getValue(), 0d)); + }, count -> { + assertEquals(9L, count.getValue(), 0d); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testStringField() throws IOException { IllegalStateException e = expectThrows(IllegalStateException.class, () -> { testCase(new MatchAllDocsQuery(), iw -> { iw.addDocument(singleton(new SortedDocValuesField(FIELD_NAME, new BytesRef("1")))); - }, count -> assertEquals(0L, count.getValue(), 0d)); + }, count -> { + assertEquals(0L, count.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); }); assertEquals("unexpected docvalues type SORTED for field 'field' (expected one of [SORTED_NUMERIC, NUMERIC]). " + "Re-index with correct docvalues type.", e.getMessage()); @@ -159,13 +178,13 @@ private void verifySummationOfDoubles(double[] values, double expected, double d private void testCase(Query query, CheckedConsumer indexer, - Consumer verify) throws IOException { + Consumer verify) throws IOException { testCase(query, indexer, verify, NumberFieldMapper.NumberType.LONG); } private void testCase(Query query, CheckedConsumer indexer, - Consumer verify, + Consumer verify, NumberFieldMapper.NumberType fieldNumberType) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { @@ -187,7 +206,7 @@ private void testCase(Query query, indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((Sum) aggregator.buildAggregation(0L)); + verify.accept((InternalSum) aggregator.buildAggregation(0L)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java index 363ba14198390..2541583e94580 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentileRanksAggregatorTests.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.metrics.PercentileRanks; import org.elasticsearch.search.aggregations.metrics.PercentileRanksAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Matchers; import java.io.IOException; @@ -55,6 +56,7 @@ public void testEmpty() throws IOException { Percentile rank = ranks.iterator().next(); assertEquals(Double.NaN, rank.getPercent(), 0d); assertEquals(0.5, rank.getValue(), 0d); + assertFalse(AggregationInspectionHelper.hasValue(((InternalTDigestPercentileRanks)ranks))); } } @@ -91,6 +93,7 @@ public void testSimple() throws IOException { // https://github.com/elastic/elasticsearch/issues/14851 // assertThat(rank.getPercent(), Matchers.equalTo(100d)); assertFalse(rankIterator.hasNext()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTDigestPercentileRanks)ranks))); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java index 8a4f399cb2525..0b1692fa6133c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestPercentilesAggregatorTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.metrics.PercentilesAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.PercentilesMethod; import org.elasticsearch.search.aggregations.metrics.TDigestPercentilesAggregator; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.function.Consumer; @@ -52,6 +53,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, tdigest -> { assertEquals(0L, tdigest.state.size()); + assertFalse(AggregationInspectionHelper.hasValue(tdigest)); }); } @@ -61,6 +63,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); }, tdigest -> { assertEquals(0L, tdigest.state.size()); + assertFalse(AggregationInspectionHelper.hasValue(tdigest)); }); } @@ -82,6 +85,7 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { assertEquals("2.0", tdigest.percentileAsString(50)); assertEquals(1.0d, tdigest.percentile(22), 0.0d); assertEquals("1.0", tdigest.percentileAsString(22)); + assertTrue(AggregationInspectionHelper.hasValue(tdigest)); }); } @@ -107,6 +111,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { assertEquals("1.0", tdigest.percentileAsString(25)); assertEquals(0.0d, tdigest.percentile(1), 0.0d); assertEquals("0.0", tdigest.percentileAsString(1)); + assertTrue(AggregationInspectionHelper.hasValue(tdigest)); }); } @@ -127,11 +132,13 @@ public void testQueryFiltering() throws IOException { assertEquals(2.0d, tdigest.percentile(100), 0.0d); assertEquals(1.0d, tdigest.percentile(50), 0.0d); assertEquals(0.5d, tdigest.percentile(25), 0.0d); + assertTrue(AggregationInspectionHelper.hasValue(tdigest)); }); testCase(LongPoint.newRangeQuery("row", 100, 110), docs, tdigest -> { assertEquals(0L, tdigest.state.size()); assertEquals(0L, tdigest.state.centroidCount()); + assertFalse(AggregationInspectionHelper.hasValue(tdigest)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java index 921f29c915f51..585cd7f9ff434 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorTests.java @@ -50,6 +50,7 @@ import org.elasticsearch.search.aggregations.AggregatorTestCase; import org.elasticsearch.search.aggregations.bucket.terms.Terms; import org.elasticsearch.search.aggregations.metrics.TopHits; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.sort.SortOrder; import java.io.IOException; @@ -74,12 +75,14 @@ public void testTopLevel() throws Exception { assertEquals("type", searchHits.getAt(1).getType()); assertEquals("1", searchHits.getAt(2).getId()); assertEquals("type", searchHits.getAt(2).getType()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits)result))); } public void testNoResults() throws Exception { TopHits result = (TopHits) testCase(new MatchNoDocsQuery(), topHits("_name").sort("string", SortOrder.DESC)); SearchHits searchHits = ((TopHits) result).getHits(); assertEquals(0L, searchHits.getTotalHits().value); + assertFalse(AggregationInspectionHelper.hasValue(((InternalTopHits)result))); } /** @@ -106,22 +109,26 @@ public void testInsideTerms() throws Exception { assertEquals(2L, searchHits.getTotalHits().value); assertEquals("2", searchHits.getAt(0).getId()); assertEquals("1", searchHits.getAt(1).getId()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) terms.getBucketByKey("a").getAggregations().get("top")))); // The "b" bucket searchHits = ((TopHits) terms.getBucketByKey("b").getAggregations().get("top")).getHits(); assertEquals(2L, searchHits.getTotalHits().value); assertEquals("3", searchHits.getAt(0).getId()); assertEquals("1", searchHits.getAt(1).getId()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) terms.getBucketByKey("b").getAggregations().get("top")))); // The "c" bucket searchHits = ((TopHits) terms.getBucketByKey("c").getAggregations().get("top")).getHits(); assertEquals(1L, searchHits.getTotalHits().value); assertEquals("2", searchHits.getAt(0).getId()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) terms.getBucketByKey("c").getAggregations().get("top")))); // The "d" bucket searchHits = ((TopHits) terms.getBucketByKey("d").getAggregations().get("top")).getHits(); assertEquals(1L, searchHits.getTotalHits().value); assertEquals("3", searchHits.getAt(0).getId()); + assertTrue(AggregationInspectionHelper.hasValue(((InternalTopHits) terms.getBucketByKey("d").getAggregations().get("top")))); } private static final MappedFieldType STRING_FIELD_TYPE = new KeywordFieldMapper.KeywordFieldType(); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java index f9118e30a6efd..ea14d9ec671b2 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregatorTests.java @@ -35,15 +35,13 @@ import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.index.mapper.BooleanFieldMapper; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.IpFieldMapper; import org.elasticsearch.index.mapper.KeywordFieldMapper; -import org.elasticsearch.index.mapper.GeoPointFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.search.aggregations.AggregatorTestCase; -import org.elasticsearch.search.aggregations.metrics.ValueCount; -import org.elasticsearch.search.aggregations.metrics.ValueCountAggregationBuilder; -import org.elasticsearch.search.aggregations.metrics.ValueCountAggregator; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.ValueType; import java.io.IOException; @@ -60,7 +58,10 @@ public void testNoDocs() throws IOException { for (ValueType valueType : ValueType.values()) { testCase(new MatchAllDocsQuery(), valueType, iw -> { // Intentionally not writing any docs - }, count -> assertEquals(0L, count.getValue())); + }, count -> { + assertEquals(0L, count.getValue()); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } } @@ -68,7 +69,10 @@ public void testNoMatchingField() throws IOException { testCase(new MatchAllDocsQuery(), ValueType.LONG, iw -> { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 7))); iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 1))); - }, count -> assertEquals(0L, count.getValue())); + }, count -> { + assertEquals(0L, count.getValue()); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } public void testSomeMatchesSortedNumericDocValues() throws IOException { @@ -76,14 +80,20 @@ public void testSomeMatchesSortedNumericDocValues() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 7))); iw.addDocument(singleton(new SortedNumericDocValuesField(FIELD_NAME, 7))); iw.addDocument(singleton(new SortedNumericDocValuesField(FIELD_NAME, 1))); - }, count -> assertEquals(2L, count.getValue())); + }, count -> { + assertEquals(2L, count.getValue()); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testSomeMatchesNumericDocValues() throws IOException { testCase(new DocValuesFieldExistsQuery(FIELD_NAME), ValueType.NUMBER, iw -> { iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 7))); iw.addDocument(singleton(new NumericDocValuesField(FIELD_NAME, 1))); - }, count -> assertEquals(2L, count.getValue())); + }, count -> { + assertEquals(2L, count.getValue()); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testQueryFiltering() throws IOException { @@ -93,20 +103,26 @@ public void testQueryFiltering() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("level", 3), new SortedDocValuesField(FIELD_NAME, new BytesRef("foo")))); iw.addDocument(Arrays.asList(new IntPoint("level", 5), new SortedDocValuesField(FIELD_NAME, new BytesRef("baz")))); iw.addDocument(Arrays.asList(new IntPoint("level", 7), new SortedDocValuesField(FIELD_NAME, new BytesRef("baz")))); - }, count -> assertEquals(4L, count.getValue())); + }, count -> { + assertEquals(4L, count.getValue()); + assertTrue(AggregationInspectionHelper.hasValue(count)); + }); } public void testQueryFiltersAll() throws IOException { testCase(IntPoint.newRangeQuery("level", -1, 0), ValueType.STRING, iw -> { iw.addDocument(Arrays.asList(new IntPoint("level", 3), new SortedDocValuesField(FIELD_NAME, new BytesRef("foo")))); iw.addDocument(Arrays.asList(new IntPoint("level", 5), new SortedDocValuesField(FIELD_NAME, new BytesRef("baz")))); - }, count -> assertEquals(0L, count.getValue())); + }, count -> { + assertEquals(0L, count.getValue()); + assertFalse(AggregationInspectionHelper.hasValue(count)); + }); } private void testCase(Query query, ValueType valueType, CheckedConsumer indexer, - Consumer verify) throws IOException { + Consumer verify) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { @@ -127,7 +143,7 @@ private void testCase(Query query, aggregator.preCollection(); indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((ValueCount) aggregator.buildAggregation(0L)); + verify.accept((InternalValueCount) aggregator.buildAggregation(0L)); } } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java index 3836f0cc2ae14..d0027208b104b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java @@ -39,6 +39,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalWeightedAvg; import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregator; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; import org.joda.time.DateTimeZone; @@ -63,6 +64,7 @@ public void testNoDocs() throws IOException { // Intentionally not writing any docs }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -77,6 +79,7 @@ public void testNoMatchingField() throws IOException { iw.addDocument(singleton(new SortedNumericDocValuesField("wrong_number", 3))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -95,6 +98,7 @@ public void testSomeMatchesSortedNumericDocValuesNoWeight() throws IOException { new SortedNumericDocValuesField("weight_field", 1))); }, avg -> { assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -115,6 +119,7 @@ public void testSomeMatchesSortedNumericDocValuesWeights() throws IOException { }, avg -> { // (7*2 + 2*3 + 3*3) / (2+3+3) == 3.625 assertEquals(3.625, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -133,6 +138,7 @@ public void testSomeMatchesNumericDocValues() throws IOException { new SortedNumericDocValuesField("weight_field", 1))); }, avg -> { assertEquals(4, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -151,6 +157,7 @@ public void testQueryFiltering() throws IOException { new SortedNumericDocValuesField("weight_field", 1))); }, avg -> { assertEquals(2.5, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -170,6 +177,7 @@ public void testQueryFilteringWeights() throws IOException { }, avg -> { double value = (2.0*3.0 + 3.0*4.0) / (3.0+4.0); assertEquals(value, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -185,6 +193,7 @@ public void testQueryFiltersAll() throws IOException { iw.addDocument(Arrays.asList(new IntPoint("value_field", 3), new SortedNumericDocValuesField("value_field", 7))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -203,6 +212,7 @@ public void testQueryFiltersAllWeights() throws IOException { new SortedNumericDocValuesField("weight_field", 4))); }, avg -> { assertEquals(Double.NaN, avg.getValue(), 0); + assertFalse(AggregationInspectionHelper.hasValue(avg)); }); } @@ -222,6 +232,7 @@ public void testValueSetMissing() throws IOException { }, avg -> { double value = (2.0*2.0 + 2.0*3.0 + 2.0*4.0) / (2.0+3.0+4.0); assertEquals(value, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -241,6 +252,7 @@ public void testWeightSetMissing() throws IOException { }, avg -> { double value = (2.0*2.0 + 3.0*2.0 + 4.0*2.0) / (2.0+2.0+2.0); assertEquals(value, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } @@ -311,6 +323,7 @@ public void testMultiValues() throws IOException { }, avg -> { double value = (((2.0+3.0)/2.0) + ((3.0+4.0)/2.0) + ((4.0+5.0)/2.0)) / (1.0+1.0+1.0); assertEquals(value, avg.getValue(), 0); + assertTrue(AggregationInspectionHelper.hasValue(avg)); }); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index 37882bcdf5e44..b1eec2b0f48ca 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -47,6 +47,7 @@ import org.elasticsearch.search.aggregations.metrics.InternalAvg; import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import java.io.IOException; import java.util.Arrays; @@ -93,6 +94,7 @@ public void testSimple() throws IOException { for (Histogram.Bucket bucket : buckets) { sum += ((InternalAvg) (bucket.getAggregations().get("the_avg"))).value(); assertThat(((InternalSimpleValue) (bucket.getAggregations().get("cusum"))).value(), equalTo(sum)); + assertTrue(AggregationInspectionHelper.hasValue(((InternalAvg) (bucket.getAggregations().get("the_avg"))))); } }); } @@ -116,14 +118,37 @@ public void testDerivative() throws IOException { for (int i = 0; i < buckets.size(); i++) { if (i == 0) { assertThat(((InternalSimpleValue)(buckets.get(i).getAggregations().get("cusum"))).value(), equalTo(0.0)); + assertTrue(AggregationInspectionHelper.hasValue(((InternalSimpleValue) (buckets.get(i) + .getAggregations().get("cusum"))))); } else { sum += 1.0; assertThat(((InternalSimpleValue)(buckets.get(i).getAggregations().get("cusum"))).value(), equalTo(sum)); + assertTrue(AggregationInspectionHelper.hasValue(((InternalSimpleValue) (buckets.get(i) + .getAggregations().get("cusum"))))); } } }); } + public void testCount() throws IOException { + Query query = new MatchAllDocsQuery(); + + DateHistogramAggregationBuilder aggBuilder = new DateHistogramAggregationBuilder("histo"); + aggBuilder.dateHistogramInterval(DateHistogramInterval.DAY).field(HISTO_FIELD); + aggBuilder.subAggregation(new CumulativeSumPipelineAggregationBuilder("cusum", "_count")); + + executeTestCase(query, aggBuilder, histogram -> { + assertEquals(10, ((Histogram)histogram).getBuckets().size()); + List buckets = ((Histogram)histogram).getBuckets(); + double sum = 1.0; + for (Histogram.Bucket bucket : buckets) { + assertThat(((InternalSimpleValue) (bucket.getAggregations().get("cusum"))).value(), equalTo(sum)); + assertTrue(AggregationInspectionHelper.hasValue(((InternalSimpleValue) (bucket.getAggregations().get("cusum"))))); + sum += 1.0; + } + }); + } + public void testDocCount() throws IOException { Query query = new MatchAllDocsQuery(); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java index f8996966fa1f6..cfa429e91e83d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/execution/search/extractor/TestMultiValueAggregation.java @@ -60,4 +60,4 @@ protected int doHashCode() { protected boolean doEquals(Object obj) { throw new UnsupportedOperationException(); } -} \ No newline at end of file +} From eb43ab6d6081ca4f239a0e4c2672658b1f9d9a26 Mon Sep 17 00:00:00 2001 From: Tim Brooks Date: Tue, 22 Jan 2019 10:57:37 -0700 Subject: [PATCH 17/39] Implement leader rate limiting for file restore (#37677) This is related to #35975. This commit implements rate limiting on the leader side using the CombinedRateLimiter. --- .../repository/CcrRestoreSourceService.java | 19 +++++++++++++-- .../xpack/ccr/CcrRepositoryIT.java | 24 ++++++++++++++++--- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java index 1c7f9f95adbbe..a72b2f21d71df 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java @@ -16,8 +16,10 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.CombinedRateLimiter; import org.elasticsearch.common.util.concurrent.AbstractRefCounted; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.KeyedLock; @@ -42,6 +44,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Consumer; +import java.util.function.LongConsumer; public class CcrRestoreSourceService extends AbstractLifecycleComponent implements IndexEventListener { @@ -52,6 +55,7 @@ public class CcrRestoreSourceService extends AbstractLifecycleComponent implemen private final CopyOnWriteArrayList> closeSessionListeners = new CopyOnWriteArrayList<>(); private final ThreadPool threadPool; private final CcrSettings ccrSettings; + private final CounterMetric throttleTime = new CounterMetric(); public CcrRestoreSourceService(ThreadPool threadPool, CcrSettings ccrSettings) { this.threadPool = threadPool; @@ -136,7 +140,7 @@ public synchronized SessionReader getSessionReader(String sessionUUID) { throw new IllegalArgumentException("session [" + sessionUUID + "] not found"); } restore.idle = false; - return new SessionReader(restore); + return new SessionReader(restore, ccrSettings, throttleTime::inc); } private void internalCloseSession(String sessionUUID, boolean throwIfSessionMissing) { @@ -182,6 +186,10 @@ private void maybeTimeout(String sessionUUID) { } } + public long getThrottleTime() { + return this.throttleTime.count(); + } + private static class RestoreSession extends AbstractRefCounted { private final String sessionUUID; @@ -254,9 +262,13 @@ protected void closeInternal() { public static class SessionReader implements Closeable { private final RestoreSession restoreSession; + private final CcrSettings ccrSettings; + private final LongConsumer throttleListener; - private SessionReader(RestoreSession restoreSession) { + private SessionReader(RestoreSession restoreSession, CcrSettings ccrSettings, LongConsumer throttleListener) { this.restoreSession = restoreSession; + this.ccrSettings = ccrSettings; + this.throttleListener = throttleListener; restoreSession.incRef(); } @@ -270,6 +282,9 @@ private SessionReader(RestoreSession restoreSession) { * @throws IOException if the read fails */ public long readFileBytes(String fileName, BytesReference reference) throws IOException { + CombinedRateLimiter rateLimiter = ccrSettings.getRateLimiter(); + long throttleTime = rateLimiter.maybePause(reference.length()); + throttleListener.accept(throttleTime); return restoreSession.readFileBytes(fileName, reference); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java index 47cc1c528fa5c..0a3669734dc6b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRepositoryIT.java @@ -238,9 +238,15 @@ public void testDocsAreRecovered() throws Exception { } public void testRateLimitingIsEmployed() throws Exception { + boolean followerRateLimiting = randomBoolean(); + ClusterUpdateSettingsRequest settingsRequest = new ClusterUpdateSettingsRequest(); settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getKey(), "10K")); - assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + if (followerRateLimiting) { + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } else { + assertAcked(leaderClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } String leaderClusterRepoName = CcrRepository.NAME_PREFIX + "leader_cluster"; String leaderIndex = "index1"; @@ -256,11 +262,15 @@ public void testRateLimitingIsEmployed() throws Exception { final ClusterService clusterService = getFollowerCluster().getCurrentMasterNodeInstance(ClusterService.class); List repositories = new ArrayList<>(); + List restoreSources = new ArrayList<>(); for (RepositoriesService repositoriesService : getFollowerCluster().getDataOrMasterNodeInstances(RepositoriesService.class)) { Repository repository = repositoriesService.repository(leaderClusterRepoName); repositories.add((CcrRepository) repository); } + for (CcrRestoreSourceService restoreSource : getLeaderCluster().getDataOrMasterNodeInstances(CcrRestoreSourceService.class)) { + restoreSources.add(restoreSource); + } logger.info("--> indexing some data"); for (int i = 0; i < 100; i++) { @@ -282,12 +292,20 @@ public void testRateLimitingIsEmployed() throws Exception { restoreService.restoreSnapshot(restoreRequest, waitForRestore(clusterService, future)); future.actionGet(); - assertTrue(repositories.stream().anyMatch(cr -> cr.getRestoreThrottleTimeInNanos() > 0)); + if (followerRateLimiting) { + assertTrue(repositories.stream().anyMatch(cr -> cr.getRestoreThrottleTimeInNanos() > 0)); + } else { + assertTrue(restoreSources.stream().anyMatch(cr -> cr.getThrottleTime() > 0)); + } settingsRequest = new ClusterUpdateSettingsRequest(); ByteSizeValue defaultValue = CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getDefault(Settings.EMPTY); settingsRequest.persistentSettings(Settings.builder().put(CcrSettings.RECOVERY_MAX_BYTES_PER_SECOND.getKey(), defaultValue)); - assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + if (followerRateLimiting) { + assertAcked(followerClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } else { + assertAcked(leaderClient().admin().cluster().updateSettings(settingsRequest).actionGet()); + } } public void testFollowerMappingIsUpdated() throws IOException { From 940f6ba4c1192e1663168ebce4a42da6f0352d87 Mon Sep 17 00:00:00 2001 From: Brandon Kobel Date: Tue, 22 Jan 2019 12:09:08 -0800 Subject: [PATCH 18/39] Remove kibana_user and kibana_dashboard_only_user index privileges (#37441) * Remove kibana_user and kibana_dashboard_only_user .kibana* index privileges * Removing unused imports --- .../authz/store/ReservedRolesStore.java | 9 ++--- .../authz/store/ReservedRolesStoreTests.java | 27 --------------- .../integration/KibanaUserRoleIntegTests.java | 33 ------------------- 3 files changed, 2 insertions(+), 67 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java index f15e078f9a941..dfd276f4ee9f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStore.java @@ -46,9 +46,7 @@ private static Map initializeReservedRoles() { .put("superuser", SUPERUSER_ROLE_DESCRIPTOR) .put("transport_client", new RoleDescriptor("transport_client", new String[] { "transport_client" }, null, null, MetadataUtils.DEFAULT_RESERVED_METADATA)) - .put("kibana_user", new RoleDescriptor("kibana_user", null, new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder().indices(".kibana*").privileges("manage", "read", "index", "delete") - .build() }, new RoleDescriptor.ApplicationResourcePrivileges[] { + .put("kibana_user", new RoleDescriptor("kibana_user", null, null, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder() .application("kibana-.kibana").resources("*").privileges("all").build() }, null, null, @@ -97,10 +95,7 @@ private static Map initializeReservedRoles() { .put("kibana_dashboard_only_user", new RoleDescriptor( "kibana_dashboard_only_user", null, - new RoleDescriptor.IndicesPrivileges[] { - RoleDescriptor.IndicesPrivileges.builder() - .indices(".kibana*").privileges("read", "view_index_metadata").build() - }, + null, new RoleDescriptor.ApplicationResourcePrivileges[] { RoleDescriptor.ApplicationResourcePrivileges.builder() .application("kibana-.kibana").resources("*").privileges("read").build() }, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java index 7ad03846c1d7e..5a567ad13ff80 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/store/ReservedRolesStoreTests.java @@ -312,20 +312,6 @@ public void testKibanaUserRole() { assertThat(kibanaUserRole.indices().allowedIndicesMatcher("indices:foo") .test(randomAlphaOfLengthBetween(8, 24)), is(false)); - Arrays.asList(".kibana", ".kibana-devnull").forEach((index) -> { - logger.info("index name [{}]", index); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(false)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(false)); - - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(IndexAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); - assertThat(kibanaUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(true)); - }); - final String randomApplication = "kibana-" + randomAlphaOfLengthBetween(8, 24); assertThat(kibanaUserRole.application().grants(new ApplicationPrivilege(randomApplication, "app-random", "all"), "*"), is(false)); @@ -569,19 +555,6 @@ public void testKibanaDashboardOnlyUserRole() { assertThat(dashboardsOnlyUserRole.runAs().check(randomAlphaOfLengthBetween(1, 12)), is(false)); - final String index = ".kibana"; - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher("indices:foo").test(index), is(false)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher("indices:bar").test(index), is(false)); - - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(DeleteAction.NAME).test(index), is(false)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(DeleteIndexAction.NAME).test(index), is(false)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(CreateIndexAction.NAME).test(index), is(false)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(UpdateSettingsAction.NAME).test(index), is(false)); - - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(GetIndexAction.NAME).test(index), is(true)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(SearchAction.NAME).test(index), is(true)); - assertThat(dashboardsOnlyUserRole.indices().allowedIndicesMatcher(MultiSearchAction.NAME).test(index), is(true)); - final String randomApplication = "kibana-" + randomAlphaOfLengthBetween(8, 24); assertThat(dashboardsOnlyUserRole.application().grants(new ApplicationPrivilege(randomApplication, "app-random", "all"), "*"), is(false)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java index 597314b2a6196..19533542686de 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/KibanaUserRoleIntegTests.java @@ -5,15 +5,11 @@ */ package org.elasticsearch.integration; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryResponse; -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -23,11 +19,9 @@ import org.elasticsearch.test.NativeRealmIntegTestCase; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; -import java.util.Locale; import java.util.Map; import static java.util.Collections.singletonMap; -import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -149,33 +143,6 @@ public void testGetIndex() throws Exception { assertThat(response.getIndices(), arrayContaining(index)); } - public void testCreateIndexDeleteInKibanaIndex() throws Exception { - final String index = randomBoolean()? ".kibana" : ".kibana-" + randomAlphaOfLengthBetween(1, 10).toLowerCase(Locale.ENGLISH); - - if (randomBoolean()) { - CreateIndexResponse createIndexResponse = client().filterWithHeader(singletonMap("Authorization", - UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) - .admin().indices().prepareCreate(index).get(); - assertThat(createIndexResponse.isAcknowledged(), is(true)); - } - - IndexResponse response = client() - .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) - .prepareIndex() - .setIndex(index) - .setType("dashboard") - .setSource("foo", "bar") - .setRefreshPolicy(IMMEDIATE) - .get(); - assertEquals(DocWriteResponse.Result.CREATED, response.getResult()); - - DeleteResponse deleteResponse = client() - .filterWithHeader(singletonMap("Authorization", UsernamePasswordToken.basicAuthHeaderValue("kibana_user", USERS_PASSWD))) - .prepareDelete(index, "dashboard", response.getId()) - .get(); - assertEquals(DocWriteResponse.Result.DELETED, deleteResponse.getResult()); - } - public void testGetMappings() throws Exception { final String index = "logstash-20-12-2015"; final String type = "event"; From 992bfd2064a4074b8eb278b6354ae68e9fa87fda Mon Sep 17 00:00:00 2001 From: Julie Tibshirani Date: Tue, 22 Jan 2019 12:13:01 -0800 Subject: [PATCH 19/39] Remove additional references to 'type' from the _bulk documentation. (#37722) --- docs/reference/docs/bulk.asciidoc | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/docs/reference/docs/bulk.asciidoc b/docs/reference/docs/bulk.asciidoc index 0aae2365d965e..16e93ac196c8d 100644 --- a/docs/reference/docs/bulk.asciidoc +++ b/docs/reference/docs/bulk.asciidoc @@ -57,7 +57,7 @@ newlines. Example: [source,js] -------------------------------------------------- $ cat requests -{ "index" : { "_index" : "test", "_type" : "_doc", "_id" : "1" } } +{ "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } $ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo {"took":7, "errors": false, "items":[{"index":{"_index":"test","_type":"_doc","_id":"1","_version":1,"result":"created","forced_refresh":false}}]} @@ -72,12 +72,12 @@ example of a correct sequence of bulk commands: [source,js] -------------------------------------------------- POST _bulk -{ "index" : { "_index" : "test", "_type" : "_doc", "_id" : "1" } } +{ "index" : { "_index" : "test", "_id" : "1" } } { "field1" : "value1" } -{ "delete" : { "_index" : "test", "_type" : "_doc", "_id" : "2" } } -{ "create" : { "_index" : "test", "_type" : "_doc", "_id" : "3" } } +{ "delete" : { "_index" : "test", "_id" : "2" } } +{ "create" : { "_index" : "test", "_id" : "3" } } { "field1" : "value3" } -{ "update" : {"_id" : "1", "_type" : "_doc", "_index" : "test"} } +{ "update" : {"_id" : "1", "_index" : "test"} } { "doc" : {"field2" : "value2"} } -------------------------------------------------- // CONSOLE @@ -265,15 +265,15 @@ the options. Example with update actions: [source,js] -------------------------------------------------- POST _bulk -{ "update" : {"_id" : "1", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} } +{ "update" : {"_id" : "1", "_index" : "index1", "retry_on_conflict" : 3} } { "doc" : {"field" : "value"} } -{ "update" : { "_id" : "0", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} } +{ "update" : { "_id" : "0", "_index" : "index1", "retry_on_conflict" : 3} } { "script" : { "source": "ctx._source.counter += params.param1", "lang" : "painless", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}} -{ "update" : {"_id" : "2", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} } +{ "update" : {"_id" : "2", "_index" : "index1", "retry_on_conflict" : 3} } { "doc" : {"field" : "value"}, "doc_as_upsert" : true } -{ "update" : {"_id" : "3", "_type" : "_doc", "_index" : "index1", "_source" : true} } +{ "update" : {"_id" : "3", "_index" : "index1", "_source" : true} } { "doc" : {"field" : "value"} } -{ "update" : {"_id" : "4", "_type" : "_doc", "_index" : "index1"} } +{ "update" : {"_id" : "4", "_index" : "index1"} } { "doc" : {"field" : "value"}, "_source": true} -------------------------------------------------- // CONSOLE From c28479819efa201ab141a8c5c5ccd856c5a00116 Mon Sep 17 00:00:00 2001 From: olcbean Date: Tue, 22 Jan 2019 21:36:42 +0100 Subject: [PATCH 20/39] Fix a typo in a warning message in TestFixturesPlugin (#37631) --- .../elasticsearch/gradle/testfixtures/TestFixturesPlugin.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index c13bcc02cbe89..32a50fb4b0750 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -96,7 +96,7 @@ public void apply(Project project) { if (dockerComposeSupported(project) == false) { project.getLogger().warn( "Tests for {} require docker-compose at /usr/local/bin/docker-compose or /usr/bin/docker-compose " + - "but none could not be found so these will be skipped", project.getPath() + "but none could be found so these will be skipped", project.getPath() ); tasks.withType(getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask"), task -> task.setEnabled(false) From 942fc13af5985eda8723f9c072194c7e4c0dc5fc Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Tue, 22 Jan 2019 16:49:03 -0500 Subject: [PATCH 21/39] Use plain text instead of latexmath As latexmath is not rendered, using plain text instead Closes #37718 --- docs/reference/query-dsl/script-score-query.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index dfd06a04523c3..2189319a1fc86 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -53,7 +53,7 @@ rewriting equivalent functions of your own, as these functions try to be the most efficient by using the internal mechanisms. ===== rational -latexmath:[rational(value,k) = value/(k + value)] +`rational(value,k) = value/(k + value)` [source,js] -------------------------------------------------- @@ -64,7 +64,7 @@ latexmath:[rational(value,k) = value/(k + value)] // NOTCONSOLE ===== sigmoid -latexmath:[sigmoid(value, k, a) = value^a/ (k^a + value^a)] +`sigmoid(value, k, a) = value^a/ (k^a + value^a)` [source,js] -------------------------------------------------- From e2e00cd2450058384ce4bbdf16b130824a75f81a Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Wed, 23 Jan 2019 07:21:26 +0100 Subject: [PATCH 22/39] Fix MetaStateFormat tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's not safe to continue writing state using MetaDataStateFormat after dirty WriteStateException occurred if it's not recovered by successful subsequent state write. We've encountered test failure of testFailRandomlyAndReadAnyState. The test breaks in the following way. There are 3 state paths. And what happens next Successful write at the beginning of the test yields 0 0 0 state files in the directories. 1st write in the loop is unsuccessful, but not dirty - 0 0 0. 2nd write in the loop is not successful and dirty (failure during fsync), however before removing new files we have 1 1 1. But now during deletion, the first deletion fails and we get - 1 0 0. 3rd write in the loop is unsuccessful, but not dirty - so we want to keep old generation, which happens to be the 1st generation, so now we have 1 x x in state folders. Now we assert that we either load 0 or 1 state from the state folders and select only 2rd and 3th folder to emulate disk failures - this results in NPE because there is nothing in these folders. Fortunately, this won’t be a problem in real life, because if there is a dirty exception, we shut down the node and make sure we perform a successful write on the node startup. --- .../org/elasticsearch/gateway/GatewayMetaStateTests.java | 6 ++++++ .../org/elasticsearch/gateway/MetaDataStateFormatTests.java | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 7a6b751de74f9..22259b919ec6f 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -412,6 +412,12 @@ public void testAtomicityWithFailures() throws IOException { } catch (WriteStateException e) { if (e.isDirty()) { possibleMetaData.add(metaData); + /* + * If dirty WriteStateException occurred, it's only safe to proceed if there is subsequent + * successful write of metadata and Manifest. We prefer to break here, not to over complicate test logic. + * See also MetaDataStateFormat#testFailRandomlyAndReadAnyState, that does not break. + */ + break; } } } diff --git a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java index 2522452f72774..40f3bd8a01623 100644 --- a/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTests.java @@ -408,6 +408,12 @@ public void testFailRandomlyAndReadAnyState() throws IOException { Path[] randomPaths = randomSubsetOf(randomIntBetween(1, paths.length), paths).toArray(new Path[0]); DummyState stateOnDisk = format.loadLatestState(logger, NamedXContentRegistry.EMPTY, randomPaths); assertTrue(possibleStates.contains(stateOnDisk)); + if (possibleStates.size() > 1) { + //if there was a WriteStateException we need to override current state before we continue + newState = writeAndReadStateSuccessfully(format, paths); + possibleStates.clear(); + possibleStates.add(newState); + } } writeAndReadStateSuccessfully(format, paths); From 7c6566e14c22d80db7e91a422c5b3c0df7b10b2d Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Wed, 23 Jan 2019 07:22:41 +0100 Subject: [PATCH 23/39] Migrate SpecificMasterNodesIT to Zen2 (#37532) 1. testSimpleOnlyMasterNodeElection - requires cluster bootstrap when the first master node is started. 2. testElectOnlyBetweenMasterNodes - requires cluster bootstrap when the first master node is started and requires adding voting exclusion before shutting down the first master node. 3. testAliasFilterValidation - requires cluster bootstrap when the first master node is started. --- .../cluster/SpecificMasterNodesIT.java | 27 ++++++++++++++++--- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java index 3d945b7a7bb68..aaef1e58fb50e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/SpecificMasterNodesIT.java @@ -20,6 +20,9 @@ package org.elasticsearch.cluster; import org.apache.lucene.search.join.ScoreMode; +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; +import org.elasticsearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.elasticsearch.cluster.coordination.ClusterBootstrapService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.discovery.MasterNotDiscoveredException; @@ -28,10 +31,12 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.discovery.TestZenDiscovery; import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; import static org.elasticsearch.discovery.zen.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -45,10 +50,22 @@ public class SpecificMasterNodesIT extends ESIntegTestCase { @Override protected Settings nodeSettings(int nodeOrdinal) { return Settings.builder().put(super.nodeSettings(nodeOrdinal)) - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // does unsafe things .put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), 1).build(); } + @Override + protected List addExtraClusterBootstrapSettings(List allNodesSettings) { + // if it's the first master in the cluster bootstrap the cluster with this node name + Settings settings = allNodesSettings.get(0); + if (internalCluster().numMasterNodes() == 0 && settings.getAsBoolean(Node.NODE_MASTER_SETTING.getKey(), false)) { + return Collections.singletonList(Settings.builder() + .put(settings) + .put(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), settings.get(Node.NODE_NAME_SETTING.getKey())) + .build()); + } + return allNodesSettings; + } + public void testSimpleOnlyMasterNodeElection() throws IOException { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) @@ -89,7 +106,7 @@ public void testSimpleOnlyMasterNodeElection() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligibleNodeName)); } - public void testElectOnlyBetweenMasterNodes() throws IOException { + public void testElectOnlyBetweenMasterNodes() throws IOException, ExecutionException, InterruptedException { logger.info("--> start data node / non master node"); internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true) .put(Node.NODE_MASTER_SETTING.getKey(), false).put("discovery.initial_state_timeout", "1s")); @@ -119,6 +136,8 @@ public void testElectOnlyBetweenMasterNodes() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(masterNodeName)); logger.info("--> closing master node (1)"); + client().execute(AddVotingConfigExclusionsAction.INSTANCE, + new AddVotingConfigExclusionsRequest(new String[]{masterNodeName})).get(); internalCluster().stopCurrentMasterNode(); assertThat(internalCluster().nonMasterClient().admin().cluster().prepareState() .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); @@ -126,7 +145,7 @@ public void testElectOnlyBetweenMasterNodes() throws IOException { .execute().actionGet().getState().nodes().getMasterNode().getName(), equalTo(nextMasterEligableNodeName)); } - public void testAliasFilterValidation() throws Exception { + public void testAliasFilterValidation() { logger.info("--> start master node / non data"); internalCluster().startNode(Settings.builder() .put(Node.NODE_DATA_SETTING.getKey(), false).put(Node.NODE_MASTER_SETTING.getKey(), true)); From 534ba1dd349a4baadd6ab8dc63dab0a849733804 Mon Sep 17 00:00:00 2001 From: Andrey Ershov Date: Wed, 23 Jan 2019 07:23:06 +0100 Subject: [PATCH 24/39] Remove LicenseServiceClusterNotRecoveredTests (#37528) While tests migration from Zen1 to Zen2, we've encountered this test. This test is organized as follows: Starts the first cluster node. Starts the second cluster node. Checks that license is active. Interesting fact that adding assertLicenseActive(true) between 1 and 2 also makes the test pass. assertLicenseActive retrieves XPackLicenseState from the nodes and checks that active flag is set. It's set to true even before the cluster is initialized. So this test does not make sense. --- ...icenseServiceClusterNotRecoveredTests.java | 60 ------------------- 1 file changed, 60 deletions(-) delete mode 100644 x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java deleted file mode 100644 index 69710b24bb230..0000000000000 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseServiceClusterNotRecoveredTests.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.license; - -import org.elasticsearch.analysis.common.CommonAnalysisPlugin; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.discovery.TestZenDiscovery; -import org.elasticsearch.transport.Netty4Plugin; -import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; - -import java.util.Arrays; -import java.util.Collection; - -import static org.elasticsearch.test.ESIntegTestCase.Scope.TEST; - -@ESIntegTestCase.ClusterScope(scope = TEST, numDataNodes = 0, numClientNodes = 0, maxNumDataNodes = 0, transportClientRatio = 0, - autoMinMasterNodes = false) -public class LicenseServiceClusterNotRecoveredTests extends AbstractLicensesIntegrationTestCase { - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return nodeSettingsBuilder(nodeOrdinal).build(); - } - - @Override - protected boolean addMockHttpTransport() { - return false; - } - - private Settings.Builder nodeSettingsBuilder(int nodeOrdinal) { - return Settings.builder() - .put(super.nodeSettings(nodeOrdinal)) - .put("node.data", true) - .put(TestZenDiscovery.USE_ZEN2.getKey(), false) // this test is just weird - .put("resource.reload.interval.high", "500ms"); // for license mode file watcher - } - - @Override - protected Collection> nodePlugins() { - return Arrays.asList(LocalStateCompositeXPackPlugin.class, CommonAnalysisPlugin.class, Netty4Plugin.class); - } - - @Override - protected Collection> transportClientPlugins() { - return nodePlugins(); - } - - public void testClusterNotRecovered() throws Exception { - logger.info("--> start one master out of two [recovery state]"); - internalCluster().startNode(nodeSettingsBuilder(0).put("discovery.zen.minimum_master_nodes", 2).put("node.master", true)); - logger.info("--> start second master out of two [recovered state]"); - internalCluster().startNode(nodeSettingsBuilder(1).put("discovery.zen.minimum_master_nodes", 2).put("node.master", true)); - assertLicenseActive(true); - } -} From 52ba407931093ba86538596639eaf35b2a858d01 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 23 Jan 2019 09:01:58 +0100 Subject: [PATCH 25/39] Expose sequence number and primary terms in search responses (#37639) Users may require the sequence number and primary terms to perform optimistic concurrency control operations. Currently, you can get the sequence number via the `docvalues_fields` API but the primary term is not accessible because it is maintained by the `SeqNoFieldMapper` and the infrastructure can't find it. This commit adds a dedicated sub fetch phase to return both numbers that is connected to a new `seq_no_primary_term` parameter. --- .../metrics/tophits-aggregation.asciidoc | 1 + .../docs/concurrency-control.asciidoc | 2 +- docs/reference/search/request-body.asciidoc | 2 +- .../search/request/inner-hits.asciidoc | 1 + .../request/version-and-seq-no.asciidoc | 34 +++++++++ .../reference/search/request/version.asciidoc | 16 ---- .../join/query/HasChildQueryBuilderTests.java | 1 + .../elasticsearch/join/query/InnerHitsIT.java | 22 +++++- .../rest-api-spec/test/11_parent_child.yml | 55 +++++++++----- .../remote/RemoteRequestBuildersTests.java | 2 +- .../test/multi_cluster/10_basic.yml | 3 + .../resources/rest-api-spec/api/search.json | 4 + .../200_top_hits_metric.yml | 44 +++++++++-- .../test/search/110_field_collapsing.yml | 54 ++++++++++++++ .../test/search/300_sequence_numbers.yml | 74 +++++++++++++++++++ .../action/search/ExpandSearchPhase.java | 1 + .../index/query/InnerHitBuilder.java | 24 +++++- .../index/query/InnerHitContextBuilder.java | 1 + .../index/query/NestedQueryBuilder.java | 8 ++ .../rest/action/search/RestSearchAction.java | 3 + .../search/DefaultSearchContext.java | 11 +++ .../org/elasticsearch/search/SearchHit.java | 54 +++++++++++++- .../elasticsearch/search/SearchModule.java | 2 + .../elasticsearch/search/SearchService.java | 5 ++ .../metrics/TopHitsAggregationBuilder.java | 39 +++++++++- .../metrics/TopHitsAggregatorFactory.java | 7 +- .../search/builder/SearchSourceBuilder.java | 38 +++++++++- .../SeqNoPrimaryTermFetchSubPhase.java | 69 +++++++++++++++++ .../internal/FilteredSearchContext.java | 10 +++ .../search/internal/SearchContext.java | 8 +- .../search/internal/SubSearchContext.java | 11 +++ .../action/search/ExpandSearchPhaseTests.java | 4 +- .../index/query/InnerHitBuilderTests.java | 7 ++ .../index/query/NestedQueryBuilderTests.java | 16 ++-- .../elasticsearch/search/SearchHitTests.java | 5 ++ .../aggregations/metrics/TopHitsIT.java | 12 +++ .../aggregations/metrics/TopHitsTests.java | 3 + .../search/RandomSearchRequestGenerator.java | 3 + .../elasticsearch/test/TestSearchContext.java | 10 +++ 39 files changed, 603 insertions(+), 63 deletions(-) create mode 100644 docs/reference/search/request/version-and-seq-no.asciidoc delete mode 100644 docs/reference/search/request/version.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml create mode 100644 server/src/main/java/org/elasticsearch/search/fetch/subphase/SeqNoPrimaryTermFetchSubPhase.java diff --git a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc index c3b6a9bad4cfc..1ea38fec9657b 100644 --- a/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/tophits-aggregation.asciidoc @@ -25,6 +25,7 @@ The top_hits aggregation returns regular search hits, because of this many per h * <> * <> * <> +* <> ==== Example diff --git a/docs/reference/docs/concurrency-control.asciidoc b/docs/reference/docs/concurrency-control.asciidoc index e695e6b5127c9..780a9c7cf76fc 100644 --- a/docs/reference/docs/concurrency-control.asciidoc +++ b/docs/reference/docs/concurrency-control.asciidoc @@ -87,7 +87,7 @@ returns: Note: The <> can return the `_seq_no` and `_primary_term` -for each search hit by requesting the `_seq_no` and `_primary_term` <>. +for each search hit by setting <>. The sequence number and the primary term uniquely identify a change. By noting down the sequence number and primary term returned, you can make sure to only change the diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 9970c4cc6223f..dac7622aab8ed 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -213,7 +213,7 @@ include::request/preference.asciidoc[] include::request/explain.asciidoc[] -include::request/version.asciidoc[] +include::request/version-and-seq-no.asciidoc[] include::request/index-boost.asciidoc[] diff --git a/docs/reference/search/request/inner-hits.asciidoc b/docs/reference/search/request/inner-hits.asciidoc index bcd2c297e5da3..b287b1609703e 100644 --- a/docs/reference/search/request/inner-hits.asciidoc +++ b/docs/reference/search/request/inner-hits.asciidoc @@ -76,6 +76,7 @@ Inner hits also supports the following per document features: * <> * <> * <> +* <> [[nested-inner-hits]] ==== Nested inner hits diff --git a/docs/reference/search/request/version-and-seq-no.asciidoc b/docs/reference/search/request/version-and-seq-no.asciidoc new file mode 100644 index 0000000000000..2bca4c985b290 --- /dev/null +++ b/docs/reference/search/request/version-and-seq-no.asciidoc @@ -0,0 +1,34 @@ +[[search-request-seq-no-primary-term]] +=== Sequence Numbers and Primary Term + +Returns the sequence number and primary term of the last modification to each search hit. +See <> for more details. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "seq_no_primary_term": true, + "query" : { + "term" : { "user" : "kimchy" } + } +} +-------------------------------------------------- +// CONSOLE + +[[search-request-version]] +=== Version + +Returns a version for each search hit. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "version": true, + "query" : { + "term" : { "user" : "kimchy" } + } +} +-------------------------------------------------- +// CONSOLE diff --git a/docs/reference/search/request/version.asciidoc b/docs/reference/search/request/version.asciidoc deleted file mode 100644 index 57c6ce27feb91..0000000000000 --- a/docs/reference/search/request/version.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -[[search-request-version]] -=== Version - -Returns a version for each search hit. - -[source,js] --------------------------------------------------- -GET /_search -{ - "version": true, - "query" : { - "term" : { "user" : "kimchy" } - } -} --------------------------------------------------- -// CONSOLE diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java index 6e4e79d16e5a5..eea01d61386de 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/HasChildQueryBuilderTests.java @@ -252,6 +252,7 @@ public void testFromJson() throws IOException { " \"from\" : 0,\n" + " \"size\" : 100,\n" + " \"version\" : false,\n" + + " \"seq_no_primary_term\" : false,\n" + " \"explain\" : false,\n" + " \"track_scores\" : false,\n" + " \"sort\" : [ {\n" + diff --git a/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java b/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java index 7a8d2cd9dbc21..89929985ea594 100644 --- a/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java +++ b/modules/parent-join/src/test/java/org/elasticsearch/join/query/InnerHitsIT.java @@ -56,6 +56,8 @@ import static org.elasticsearch.index.query.QueryBuilders.matchQuery; import static org.elasticsearch.index.query.QueryBuilders.nestedQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_PRIMARY_TERM; +import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.elasticsearch.join.query.JoinQueryBuilders.hasChildQuery; import static org.elasticsearch.join.query.JoinQueryBuilders.hasParentQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -66,6 +68,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasId; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -133,9 +136,10 @@ public void testSimpleParentChild() throws Exception { assertThat(innerHits.getAt(1).getId(), equalTo("c2")); assertThat(innerHits.getAt(1).getType(), equalTo("doc")); + final boolean seqNoAndTerm = randomBoolean(); response = client().prepareSearch("articles") .setQuery(hasChildQuery("comment", matchQuery("message", "elephant"), ScoreMode.None) - .innerHit(new InnerHitBuilder())) + .innerHit(new InnerHitBuilder().setSeqNoAndPrimaryTerm(seqNoAndTerm))) .get(); assertNoFailures(response); assertHitCount(response, 1); @@ -152,6 +156,22 @@ public void testSimpleParentChild() throws Exception { assertThat(innerHits.getAt(2).getId(), equalTo("c6")); assertThat(innerHits.getAt(2).getType(), equalTo("doc")); + if (seqNoAndTerm) { + assertThat(innerHits.getAt(0).getPrimaryTerm(), equalTo(1L)); + assertThat(innerHits.getAt(1).getPrimaryTerm(), equalTo(1L)); + assertThat(innerHits.getAt(2).getPrimaryTerm(), equalTo(1L)); + assertThat(innerHits.getAt(0).getSeqNo(), greaterThanOrEqualTo(0L)); + assertThat(innerHits.getAt(1).getSeqNo(), greaterThanOrEqualTo(0L)); + assertThat(innerHits.getAt(2).getSeqNo(), greaterThanOrEqualTo(0L)); + } else { + assertThat(innerHits.getAt(0).getPrimaryTerm(), equalTo(UNASSIGNED_PRIMARY_TERM)); + assertThat(innerHits.getAt(1).getPrimaryTerm(), equalTo(UNASSIGNED_PRIMARY_TERM)); + assertThat(innerHits.getAt(2).getPrimaryTerm(), equalTo(UNASSIGNED_PRIMARY_TERM)); + assertThat(innerHits.getAt(0).getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); + assertThat(innerHits.getAt(1).getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); + assertThat(innerHits.getAt(2).getSeqNo(), equalTo(UNASSIGNED_SEQ_NO)); + } + response = client().prepareSearch("articles") .setQuery( hasChildQuery("comment", matchQuery("message", "fox"), ScoreMode.None).innerHit( diff --git a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml index 7273ac6a95d5e..61af4ab1acb59 100644 --- a/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml +++ b/modules/parent-join/src/test/resources/rest-api-spec/test/11_parent_child.yml @@ -11,26 +11,26 @@ setup: relations: parent: child ---- -"Parent/child inner hits": - - do: - index: - index: test - type: doc - id: 1 - body: {"foo": "bar", "join_field": {"name" : "parent"} } + - do: + index: + index: test + type: doc + id: 1 + body: {"foo": "bar", "join_field": {"name" : "parent"} } - - do: - index: - index: test - type: doc - id: 2 - routing: 1 - body: {"bar": "baz", "join_field": { "name" : "child", "parent": "1"} } + - do: + index: + index: test + type: doc + id: 2 + routing: 1 + body: {"bar": "baz", "join_field": { "name" : "child", "parent": "1"} } - - do: - indices.refresh: {} + - do: + indices.refresh: {} +--- +"Parent/child inner hits": - do: search: rest_total_hits_as_int: true @@ -41,3 +41,24 @@ setup: - match: { hits.hits.0.inner_hits.child.hits.hits.0._index: "test"} - match: { hits.hits.0.inner_hits.child.hits.hits.0._id: "2" } - is_false: hits.hits.0.inner_hits.child.hits.hits.0._nested + +--- +"Parent/child inner hits with seq no": + - skip: + version: " - 6.99.99" + reason: support was added in 7.0 + + - do: + search: + rest_total_hits_as_int: true + body: { "query" : { "has_child" : + { "type" : "child", "query" : { "match_all" : {} }, "inner_hits" : { "seq_no_primary_term": true} } + } } + - match: { hits.total: 1 } + - match: { hits.hits.0._index: "test" } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0.inner_hits.child.hits.hits.0._index: "test"} + - match: { hits.hits.0.inner_hits.child.hits.hits.0._id: "2" } + - is_false: hits.hits.0.inner_hits.child.hits.hits.0._nested + - gte: { hits.hits.0.inner_hits.child.hits.hits.0._seq_no: 0 } + - gte: { hits.hits.0.inner_hits.child.hits.hits.0._primary_term: 1 } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java index 2f801811327b8..0f985fd37016a 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -179,7 +179,7 @@ public void testInitialSearchParamsMisc() { fetchVersion = randomBoolean(); searchRequest.source().version(fetchVersion); } - + Map params = initialSearch(searchRequest, query, remoteVersion).getParameters(); if (scroll == null) { diff --git a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml index 70afa9bf917e8..5acf84139bbf4 100644 --- a/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml +++ b/qa/multi-cluster-search/src/test/resources/rest-api-spec/test/multi_cluster/10_basic.yml @@ -30,6 +30,7 @@ rest_total_hits_as_int: true index: test_index,my_remote_cluster:test_index body: + seq_no_primary_term: true aggs: cluster: terms: @@ -37,6 +38,8 @@ - match: { _shards.total: 5 } - match: { hits.total: 11 } + - gte: { hits.hits.0._seq_no: 0 } + - gte: { hits.hits.0._primary_term: 1 } - length: { aggregations.cluster.buckets: 2 } - match: { aggregations.cluster.buckets.0.key: "remote_cluster" } - match: { aggregations.cluster.buckets.0.doc_count: 6 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json index 5834ca623a99b..9ac02b1214a2f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/search.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/search.json @@ -164,6 +164,10 @@ "type" : "boolean", "description" : "Specify whether to return document version as part of a hit" }, + "seq_no_primary_term": { + "type" : "boolean", + "description" : "Specify whether to return sequence number and primary term of the last modification of each hit" + }, "request_cache": { "type" : "boolean", "description" : "Specify if request cache should be used for this request or not, defaults to index level setting" diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml index 267a9e85d1d5d..775475e01a597 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/200_top_hits_metric.yml @@ -1,8 +1,4 @@ ---- -"top_hits aggregation with nested documents": - - skip: - version: " - 6.1.99" - reason: "<= 6.1 nodes don't always include index or id in nested top hits" +setup: - do: indices.create: index: my-index @@ -54,6 +50,12 @@ ] } +--- +"top_hits aggregation with nested documents": + - skip: + version: " - 6.1.99" + reason: "<= 6.1 nodes don't always include index or id in nested top hits" + - do: search: rest_total_hits_as_int: true @@ -81,3 +83,35 @@ - match: { aggregations.to-users.users.hits.hits.2._index: my-index } - match: { aggregations.to-users.users.hits.hits.2._nested.field: users } - match: { aggregations.to-users.users.hits.hits.2._nested.offset: 1 } + + +--- +"top_hits aggregation with sequence numbers": + - skip: + version: " - 6.99.99" + reason: support was added in 7.0 + + - do: + search: + rest_total_hits_as_int: true + body: + aggs: + groups: + terms: + field: group.keyword + aggs: + users: + top_hits: + sort: "users.last.keyword" + seq_no_primary_term: true + + - match: { hits.total: 2 } + - length: { aggregations.groups.buckets.0.users.hits.hits: 2 } + - match: { aggregations.groups.buckets.0.users.hits.hits.0._id: "1" } + - match: { aggregations.groups.buckets.0.users.hits.hits.0._index: my-index } + - gte: { aggregations.groups.buckets.0.users.hits.hits.0._seq_no: 0 } + - gte: { aggregations.groups.buckets.0.users.hits.hits.0._primary_term: 1 } + - match: { aggregations.groups.buckets.0.users.hits.hits.1._id: "2" } + - match: { aggregations.groups.buckets.0.users.hits.hits.1._index: my-index } + - gte: { aggregations.groups.buckets.0.users.hits.hits.1._seq_no: 0 } + - gte: { aggregations.groups.buckets.0.users.hits.hits.1._primary_term: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index 7ba1d9b62fbc7..a85abb4e6e28b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -405,3 +405,57 @@ setup: - match: { hits.hits.1.inner_hits.sub_hits.hits.total: 3} - match: { hits.hits.2.fields.group_alias: [25] } - match: { hits.hits.2.inner_hits.sub_hits.hits.total: 2} + +--- +"field collapsing, inner_hits and seq_no": + + - skip: + version: " - 6.99.0" + reason: "sequence numbers introduced in 7.0.0" + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + collapse: { field: numeric_group, inner_hits: { + name: sub_hits, seq_no_primary_term: true, size: 2, sort: [{ sort: asc }] + } } + sort: [{ sort: desc }] + + - match: { hits.total: 6 } + - length: { hits.hits: 3 } + - match: { hits.hits.0._index: test } + - match: { hits.hits.0.fields.numeric_group: [3] } + - match: { hits.hits.0.sort: [36] } + - match: { hits.hits.0._id: "6" } + - match: { hits.hits.0.inner_hits.sub_hits.hits.total: 1 } + - length: { hits.hits.0.inner_hits.sub_hits.hits.hits: 1 } + - match: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._id: "6" } + - gte: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._seq_no: 0 } + - gte: { hits.hits.0.inner_hits.sub_hits.hits.hits.0._primary_term: 1 } + - match: { hits.hits.1._index: test } + - match: { hits.hits.1.fields.numeric_group: [1] } + - match: { hits.hits.1.sort: [24] } + - match: { hits.hits.1._id: "3" } + - match: { hits.hits.1.inner_hits.sub_hits.hits.total: 3 } + - length: { hits.hits.1.inner_hits.sub_hits.hits.hits: 2 } + - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._id: "2" } + - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._seq_no: 0 } + - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.0._primary_term: 1 } + - match: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._id: "1" } + - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._seq_no: 0 } + - gte: { hits.hits.1.inner_hits.sub_hits.hits.hits.1._primary_term: 1 } + - match: { hits.hits.2._index: test } + - match: { hits.hits.2._type: test } + - match: { hits.hits.2.fields.numeric_group: [25] } + - match: { hits.hits.2.sort: [10] } + - match: { hits.hits.2._id: "4" } + - match: { hits.hits.2.inner_hits.sub_hits.hits.total: 2 } + - length: { hits.hits.2.inner_hits.sub_hits.hits.hits: 2 } + - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.0._id: "5" } + - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.0._seq_no: 0 } + - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.0._primary_term: 1 } + - match: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._id: "4" } + - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._seq_no: 0 } + - gte: { hits.hits.2.inner_hits.sub_hits.hits.hits.1._primary_term: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml new file mode 100644 index 0000000000000..9e838d1c58f77 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/300_sequence_numbers.yml @@ -0,0 +1,74 @@ +setup: + - do: + indices.create: + index: test_1 + + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: foo } + +## we index again in order to make the seq# 1 (so we can check for the field existence with is_false) + - do: + index: + index: test_1 + type: test + id: 1 + body: { foo: bar } + + - do: + indices.refresh: + index: [test_1] + +--- +"sequence numbers are returned if requested from body": + - skip: + version: " - 6.99.99" + reason: sequence numbers were added in 7.0.0 + + - do: + search: + index: _all + body: + query: + match: + foo: bar + seq_no_primary_term: true + + - match: {hits.total.value: 1} + - match: {hits.hits.0._seq_no: 1} + - gte: {hits.hits.0._primary_term: 1} + +--- +"sequence numbers are returned if requested from url": + - skip: + version: " - 6.99.99" + reason: sequence numbers were added in 7.0.0 + + - do: + search: + index: _all + body: + query: + match: + foo: bar + seq_no_primary_term: true + + - match: {hits.total.value: 1} + - match: {hits.hits.0._seq_no: 1} + - gte: {hits.hits.0._primary_term: 1} + +--- +"sequence numbers are not returned if not requested": + - do: + search: + index: _all + body: + query: + match: + foo: bar + + - is_false: hits.hits.0._seq_no + - is_false: hits.hits.0._primary_term diff --git a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java index da481b7a4a8ee..10a85b723166c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java +++ b/server/src/main/java/org/elasticsearch/action/search/ExpandSearchPhase.java @@ -153,6 +153,7 @@ private SearchSourceBuilder buildExpandSearchSourceBuilder(InnerHitBuilder optio groupSource.explain(options.isExplain()); groupSource.trackScores(options.isTrackScores()); groupSource.version(options.isVersion()); + groupSource.seqNoAndPrimaryTerm(options.isSeqNoAndPrimaryTerm()); if (innerCollapseBuilder != null) { groupSource.collapse(innerCollapseBuilder); } diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index 733875075b051..f5be9650b8d5c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -68,6 +68,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { PARSER.declareInt(InnerHitBuilder::setSize, SearchSourceBuilder.SIZE_FIELD); PARSER.declareBoolean(InnerHitBuilder::setExplain, SearchSourceBuilder.EXPLAIN_FIELD); PARSER.declareBoolean(InnerHitBuilder::setVersion, SearchSourceBuilder.VERSION_FIELD); + PARSER.declareBoolean(InnerHitBuilder::setSeqNoAndPrimaryTerm, SearchSourceBuilder.SEQ_NO_PRIMARY_TERM_FIELD); PARSER.declareBoolean(InnerHitBuilder::setTrackScores, SearchSourceBuilder.TRACK_SCORES_FIELD); PARSER.declareStringArray(InnerHitBuilder::setStoredFieldNames, SearchSourceBuilder.STORED_FIELDS_FIELD); PARSER.declareObjectArray(InnerHitBuilder::setDocValueFields, @@ -117,7 +118,6 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { }, COLLAPSE_FIELD, ObjectParser.ValueType.OBJECT); } - private String name; private boolean ignoreUnmapped; @@ -125,6 +125,7 @@ public final class InnerHitBuilder implements Writeable, ToXContentObject { private int size = 3; private boolean explain; private boolean version; + private boolean seqNoAndPrimaryTerm; private boolean trackScores; private StoredFieldsContext storedFieldsContext; @@ -155,6 +156,11 @@ public InnerHitBuilder(StreamInput in) throws IOException { size = in.readVInt(); explain = in.readBoolean(); version = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)){ + seqNoAndPrimaryTerm = in.readBoolean(); + } else { + seqNoAndPrimaryTerm = false; + } trackScores = in.readBoolean(); storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); if (in.getVersion().before(Version.V_6_4_0)) { @@ -199,6 +205,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(size); out.writeBoolean(explain); out.writeBoolean(version); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(seqNoAndPrimaryTerm); + } out.writeBoolean(trackScores); out.writeOptionalWriteable(storedFieldsContext); if (out.getVersion().before(Version.V_6_4_0)) { @@ -299,6 +308,15 @@ public InnerHitBuilder setVersion(boolean version) { return this; } + public boolean isSeqNoAndPrimaryTerm() { + return seqNoAndPrimaryTerm; + } + + public InnerHitBuilder setSeqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; + return this; + } + public boolean isTrackScores() { return trackScores; } @@ -436,6 +454,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(SearchSourceBuilder.FROM_FIELD.getPreferredName(), from); builder.field(SearchSourceBuilder.SIZE_FIELD.getPreferredName(), size); builder.field(SearchSourceBuilder.VERSION_FIELD.getPreferredName(), version); + builder.field(SearchSourceBuilder.SEQ_NO_PRIMARY_TERM_FIELD.getPreferredName(), seqNoAndPrimaryTerm); builder.field(SearchSourceBuilder.EXPLAIN_FIELD.getPreferredName(), explain); builder.field(SearchSourceBuilder.TRACK_SCORES_FIELD.getPreferredName(), trackScores); if (fetchSourceContext != null) { @@ -494,6 +513,7 @@ public boolean equals(Object o) { Objects.equals(size, that.size) && Objects.equals(explain, that.explain) && Objects.equals(version, that.version) && + Objects.equals(seqNoAndPrimaryTerm, that.seqNoAndPrimaryTerm) && Objects.equals(trackScores, that.trackScores) && Objects.equals(storedFieldsContext, that.storedFieldsContext) && Objects.equals(docValueFields, that.docValueFields) && @@ -506,7 +526,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(name, ignoreUnmapped, from, size, explain, version, trackScores, + return Objects.hash(name, ignoreUnmapped, from, size, explain, version, seqNoAndPrimaryTerm, trackScores, storedFieldsContext, docValueFields, scriptFields, fetchSourceContext, sorts, highlightBuilder, innerCollapseBuilder); } diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java index 1fe781f38ce27..9e6a766aed891 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitContextBuilder.java @@ -78,6 +78,7 @@ protected void setupInnerHitsContext(QueryShardContext queryShardContext, innerHitsContext.size(innerHitBuilder.getSize()); innerHitsContext.explain(innerHitBuilder.isExplain()); innerHitsContext.version(innerHitBuilder.isVersion()); + innerHitsContext.seqNoAndPrimaryTerm(innerHitBuilder.isSeqNoAndPrimaryTerm()); innerHitsContext.trackScores(innerHitBuilder.isTrackScores()); if (innerHitBuilder.getStoredFieldsContext() != null) { innerHitsContext.storedFieldsContext(innerHitBuilder.getStoredFieldsContext()); diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index d2b432e7c7ca1..3c3856e208f04 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -368,6 +368,14 @@ static final class NestedInnerHitSubContext extends InnerHitsContext.InnerHitSub this.childObjectMapper = childObjectMapper; } + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + assert seqNoAndPrimaryTerm() == false; + if (seqNoAndPrimaryTerm) { + throw new UnsupportedOperationException("nested documents are not assigned sequence numbers"); + } + } + @Override public TopDocsAndMaxScore[] topDocs(SearchHit[] hits) throws IOException { Weight innerHitQueryWeight = createInnerHitQueryWeight(); diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 3e3a1e02a174b..da773efed580d 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -201,6 +201,9 @@ private static void parseSearchSource(final SearchSourceBuilder searchSourceBuil if (request.hasParam("version")) { searchSourceBuilder.version(request.paramAsBoolean("version", null)); } + if (request.hasParam("seq_no_primary_term")) { + searchSourceBuilder.seqNoAndPrimaryTerm(request.paramAsBoolean("seq_no_primary_term", null)); + } if (request.hasParam("timeout")) { searchSourceBuilder.timeout(request.paramAsTime("timeout", null)); } diff --git a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java index 590c58b1f6615..4b82e23e42061 100644 --- a/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/DefaultSearchContext.java @@ -107,6 +107,7 @@ final class DefaultSearchContext extends SearchContext { private ScrollContext scrollContext; private boolean explain; private boolean version = false; // by default, we don't return versions + private boolean seqAndPrimaryTerm = false; private StoredFieldsContext storedFields; private ScriptFieldsContext scriptFields; private FetchSourceContext fetchSourceContext; @@ -719,6 +720,16 @@ public void version(boolean version) { this.version = version; } + @Override + public boolean seqNoAndPrimaryTerm() { + return seqAndPrimaryTerm; + } + + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + this.seqAndPrimaryTerm = seqNoAndPrimaryTerm; + } + @Override public int[] docIdsToLoad() { return docIdsToLoad; diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index cb34804981f1b..42f96e52fb119 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; @@ -46,6 +47,7 @@ import org.elasticsearch.index.mapper.IgnoredFieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.fetch.subphase.highlight.HighlightField; import org.elasticsearch.search.lookup.SourceLookup; @@ -91,6 +93,8 @@ public final class SearchHit implements Streamable, ToXContentObject, Iterable, parser.declareField((map, value) -> map.put(Fields._SCORE, value), SearchHit::parseScore, new ParseField(Fields._SCORE), ValueType.FLOAT_OR_NULL); parser.declareLong((map, value) -> map.put(Fields._VERSION, value), new ParseField(Fields._VERSION)); + parser.declareLong((map, value) -> map.put(Fields._SEQ_NO, value), new ParseField(Fields._SEQ_NO)); + parser.declareLong((map, value) -> map.put(Fields._PRIMARY_TERM, value), new ParseField(Fields._PRIMARY_TERM)); parser.declareField((map, value) -> map.put(Fields._SHARD, value), (p, c) -> ShardId.fromString(p.text()), new ParseField(Fields._SHARD), ValueType.STRING); parser.declareObject((map, value) -> map.put(SourceFieldMapper.NAME, value), (p, c) -> parseSourceBytes(p), @@ -588,6 +626,8 @@ public static SearchHit createFromMap(Map values) { } searchHit.score(get(Fields._SCORE, values, DEFAULT_SCORE)); searchHit.version(get(Fields._VERSION, values, -1L)); + searchHit.setSeqNo(get(Fields._SEQ_NO, values, SequenceNumbers.UNASSIGNED_SEQ_NO)); + searchHit.setPrimaryTerm(get(Fields._PRIMARY_TERM, values, SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); searchHit.sortValues(get(Fields.SORT, values, SearchSortValues.EMPTY)); searchHit.highlightFields(get(Fields.HIGHLIGHT, values, null)); searchHit.sourceRef(get(SourceFieldMapper.NAME, values, null)); @@ -744,6 +784,10 @@ public void readFrom(StreamInput in) throws IOException { type = in.readOptionalText(); nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); version = in.readLong(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + seqNo = in.readZLong(); + primaryTerm = in.readVLong(); + } source = in.readBytesReference(); if (source.length() == 0) { source = null; @@ -812,6 +856,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalText(type); out.writeOptionalWriteable(nestedIdentity); out.writeLong(version); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeZLong(seqNo); + out.writeVLong(primaryTerm); + } out.writeBytesReference(source); if (explanation == null) { out.writeBoolean(false); @@ -867,6 +915,8 @@ public boolean equals(Object obj) { && Objects.equals(type, other.type) && Objects.equals(nestedIdentity, other.nestedIdentity) && Objects.equals(version, other.version) + && Objects.equals(seqNo, other.seqNo) + && Objects.equals(primaryTerm, other.primaryTerm) && Objects.equals(source, other.source) && Objects.equals(fields, other.fields) && Objects.equals(getHighlightFields(), other.getHighlightFields()) @@ -880,8 +930,8 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return Objects.hash(id, type, nestedIdentity, version, source, fields, getHighlightFields(), Arrays.hashCode(matchedQueries), - explanation, shard, innerHits, index, clusterAlias); + return Objects.hash(id, type, nestedIdentity, version, seqNo, primaryTerm, source, fields, getHighlightFields(), + Arrays.hashCode(matchedQueries), explanation, shard, innerHits, index, clusterAlias); } /** diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 2531685b94557..e75271c2885ae 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -240,6 +240,7 @@ import org.elasticsearch.search.fetch.subphase.MatchedQueriesFetchSubPhase; import org.elasticsearch.search.fetch.subphase.ScoreFetchSubPhase; import org.elasticsearch.search.fetch.subphase.ScriptFieldsFetchSubPhase; +import org.elasticsearch.search.fetch.subphase.SeqNoPrimaryTermFetchSubPhase; import org.elasticsearch.search.fetch.subphase.VersionFetchSubPhase; import org.elasticsearch.search.fetch.subphase.highlight.FastVectorHighlighter; import org.elasticsearch.search.fetch.subphase.highlight.HighlightPhase; @@ -727,6 +728,7 @@ private void registerFetchSubPhases(List plugins) { registerFetchSubPhase(new ScriptFieldsFetchSubPhase()); registerFetchSubPhase(new FetchSourceSubPhase()); registerFetchSubPhase(new VersionFetchSubPhase()); + registerFetchSubPhase(new SeqNoPrimaryTermFetchSubPhase()); registerFetchSubPhase(new MatchedQueriesFetchSubPhase()); registerFetchSubPhase(new HighlightPhase(highlighters)); registerFetchSubPhase(new ScoreFetchSubPhase()); diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 4fee684276288..ddec3637ed491 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -901,6 +901,11 @@ private void parseSource(DefaultSearchContext context, SearchSourceBuilder sourc if (source.version() != null) { context.version(source.version()); } + + if (source.seqNoAndPrimaryTerm() != null) { + context.seqNoAndPrimaryTerm(source.seqNoAndPrimaryTerm()); + } + if (source.stats() != null) { context.groupStats(source.stats()); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index debbacdc6196c..ba51099d6bc00 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.aggregations.metrics; +import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -66,6 +67,7 @@ public class TopHitsAggregationBuilder extends AbstractAggregationBuilder> sorts = null; private HighlightBuilder highlightBuilder; @@ -85,6 +87,7 @@ protected TopHitsAggregationBuilder(TopHitsAggregationBuilder clone, this.size = clone.size; this.explain = clone.explain; this.version = clone.version; + this.seqNoAndPrimaryTerm = clone.seqNoAndPrimaryTerm; this.trackScores = clone.trackScores; this.sorts = clone.sorts == null ? null : new ArrayList<>(clone.sorts); this.highlightBuilder = clone.highlightBuilder == null ? null : @@ -137,6 +140,9 @@ public TopHitsAggregationBuilder(StreamInput in) throws IOException { } trackScores = in.readBoolean(); version = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + seqNoAndPrimaryTerm = in.readBoolean(); + } } @Override @@ -173,6 +179,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeBoolean(trackScores); out.writeBoolean(version); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeBoolean(seqNoAndPrimaryTerm); + } } /** @@ -526,6 +535,23 @@ public boolean version() { return version; } + /** + * Should each {@link org.elasticsearch.search.SearchHit} be returned with the + * sequence number and primary term of the last modification of the document. + */ + public TopHitsAggregationBuilder seqNoAndPrimaryTerm(Boolean seqNoAndPrimaryTerm) { + this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; + return this; + } + + /** + * Indicates whether {@link org.elasticsearch.search.SearchHit}s should be returned with the + * sequence number and primary term of the last modification of the document. + */ + public Boolean seqNoAndPrimaryTerm() { + return seqNoAndPrimaryTerm; + } + /** * Applies when sorting, and controls if scores will be tracked as well. * Defaults to {@code false}. @@ -579,8 +605,9 @@ protected TopHitsAggregatorFactory doBuild(SearchContext context, AggregatorFact } else { optionalSort = SortBuilder.buildSort(sorts, context.getQueryShardContext()); } - return new TopHitsAggregatorFactory(name, from, size, explain, version, trackScores, optionalSort, highlightBuilder, - storedFieldsContext, docValueFields, fields, fetchSourceContext, context, parent, subfactoriesBuilder, metaData); + return new TopHitsAggregatorFactory(name, from, size, explain, version, seqNoAndPrimaryTerm, trackScores, optionalSort, + highlightBuilder, storedFieldsContext, docValueFields, fields, fetchSourceContext, context, parent, subfactoriesBuilder, + metaData); } @Override @@ -589,6 +616,7 @@ protected XContentBuilder internalXContent(XContentBuilder builder, Params param builder.field(SearchSourceBuilder.FROM_FIELD.getPreferredName(), from); builder.field(SearchSourceBuilder.SIZE_FIELD.getPreferredName(), size); builder.field(SearchSourceBuilder.VERSION_FIELD.getPreferredName(), version); + builder.field(SearchSourceBuilder.SEQ_NO_PRIMARY_TERM_FIELD.getPreferredName(), seqNoAndPrimaryTerm); builder.field(SearchSourceBuilder.EXPLAIN_FIELD.getPreferredName(), explain); if (fetchSourceContext != null) { builder.field(SearchSourceBuilder._SOURCE_FIELD.getPreferredName(), fetchSourceContext); @@ -646,6 +674,8 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa factory.size(parser.intValue()); } else if (SearchSourceBuilder.VERSION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.version(parser.booleanValue()); + } else if (SearchSourceBuilder.SEQ_NO_PRIMARY_TERM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + factory.seqNoAndPrimaryTerm(parser.booleanValue()); } else if (SearchSourceBuilder.EXPLAIN_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { factory.explain(parser.booleanValue()); } else if (SearchSourceBuilder.TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { @@ -745,7 +775,7 @@ public static TopHitsAggregationBuilder parse(String aggregationName, XContentPa @Override protected int doHashCode() { return Objects.hash(explain, fetchSourceContext, docValueFields, storedFieldsContext, from, highlightBuilder, - scriptFields, size, sorts, trackScores, version); + scriptFields, size, sorts, trackScores, version, seqNoAndPrimaryTerm); } @Override @@ -761,7 +791,8 @@ protected boolean doEquals(Object obj) { && Objects.equals(size, other.size) && Objects.equals(sorts, other.sorts) && Objects.equals(trackScores, other.trackScores) - && Objects.equals(version, other.version); + && Objects.equals(version, other.version) + && Objects.equals(seqNoAndPrimaryTerm, other.seqNoAndPrimaryTerm); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java index 6086942955122..7edaccb66d4bd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregatorFactory.java @@ -44,6 +44,7 @@ class TopHitsAggregatorFactory extends AggregatorFactory sort; private final HighlightBuilder highlightBuilder; @@ -52,8 +53,8 @@ class TopHitsAggregatorFactory extends AggregatorFactory scriptFields; private final FetchSourceContext fetchSourceContext; - TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean trackScores, - Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, + TopHitsAggregatorFactory(String name, int from, int size, boolean explain, boolean version, boolean seqNoAndPrimaryTerm, + boolean trackScores, Optional sort, HighlightBuilder highlightBuilder, StoredFieldsContext storedFieldsContext, List docValueFields, List scriptFields, FetchSourceContext fetchSourceContext, SearchContext context, AggregatorFactory parent, AggregatorFactories.Builder subFactories, Map metaData) throws IOException { @@ -62,6 +63,7 @@ class TopHitsAggregatorFactory extends AggregatorFactory> sorts; private boolean trackScores = false; @@ -247,6 +250,11 @@ public SearchSourceBuilder(StreamInput in) throws IOException { timeout = in.readOptionalTimeValue(); trackScores = in.readBoolean(); version = in.readOptionalBoolean(); + if (in.getVersion().onOrAfter(Version.V_7_0_0)) { + seqNoAndPrimaryTerm = in.readOptionalBoolean(); + } else { + seqNoAndPrimaryTerm = null; + } extBuilders = in.readNamedWriteableList(SearchExtBuilder.class); profile = in.readBoolean(); searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); @@ -310,6 +318,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalTimeValue(timeout); out.writeBoolean(trackScores); out.writeOptionalBoolean(version); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeOptionalBoolean(seqNoAndPrimaryTerm); + } out.writeNamedWriteableList(extBuilders); out.writeBoolean(profile); out.writeOptionalWriteable(searchAfterBuilder); @@ -441,6 +452,23 @@ public Boolean version() { return version; } + /** + * Should each {@link org.elasticsearch.search.SearchHit} be returned with the + * sequence number and primary term of the last modification of the document. + */ + public SearchSourceBuilder seqNoAndPrimaryTerm(Boolean seqNoAndPrimaryTerm) { + this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; + return this; + } + + /** + * Indicates whether {@link org.elasticsearch.search.SearchHit}s should be returned with the + * sequence number and primary term of the last modification of the document. + */ + public Boolean seqNoAndPrimaryTerm() { + return seqNoAndPrimaryTerm; + } + /** * An optional timeout to control how long search is allowed to take. */ @@ -999,6 +1027,7 @@ private SearchSourceBuilder shallowCopy(QueryBuilder queryBuilder, QueryBuilder rewrittenBuilder.trackScores = trackScores; rewrittenBuilder.trackTotalHitsUpTo = trackTotalHitsUpTo; rewrittenBuilder.version = version; + rewrittenBuilder.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; rewrittenBuilder.collapse = collapse; return rewrittenBuilder; } @@ -1038,6 +1067,8 @@ public void parseXContent(XContentParser parser, boolean checkTrailingTokens) th minScore = parser.floatValue(); } else if (VERSION_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { version = parser.booleanValue(); + } else if (SEQ_NO_PRIMARY_TERM_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + seqNoAndPrimaryTerm = parser.booleanValue(); } else if (EXPLAIN_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { explain = parser.booleanValue(); } else if (TRACK_SCORES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { @@ -1205,6 +1236,10 @@ public XContentBuilder innerToXContent(XContentBuilder builder, Params params) t builder.field(VERSION_FIELD.getPreferredName(), version); } + if (seqNoAndPrimaryTerm != null) { + builder.field(SEQ_NO_PRIMARY_TERM_FIELD.getPreferredName(), seqNoAndPrimaryTerm); + } + if (explain != null) { builder.field(EXPLAIN_FIELD.getPreferredName(), explain); } @@ -1523,7 +1558,7 @@ public int hashCode() { return Objects.hash(aggregations, explain, fetchSourceContext, docValueFields, storedFieldsContext, from, highlightBuilder, indexBoosts, minScore, postQueryBuilder, queryBuilder, rescoreBuilders, scriptFields, size, sorts, searchAfterBuilder, sliceBuilder, stats, suggestBuilder, terminateAfter, timeout, trackScores, version, - profile, extBuilders, collapse, trackTotalHitsUpTo); + seqNoAndPrimaryTerm, profile, extBuilders, collapse, trackTotalHitsUpTo); } @Override @@ -1558,6 +1593,7 @@ public boolean equals(Object obj) { && Objects.equals(timeout, other.timeout) && Objects.equals(trackScores, other.trackScores) && Objects.equals(version, other.version) + && Objects.equals(seqNoAndPrimaryTerm, other.seqNoAndPrimaryTerm) && Objects.equals(profile, other.profile) && Objects.equals(extBuilders, other.extBuilders) && Objects.equals(collapse, other.collapse) diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/SeqNoPrimaryTermFetchSubPhase.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/SeqNoPrimaryTermFetchSubPhase.java new file mode 100644 index 0000000000000..31a6328ff9574 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/SeqNoPrimaryTermFetchSubPhase.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.fetch.subphase; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.NumericDocValues; +import org.apache.lucene.index.ReaderUtil; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.fetch.FetchSubPhase; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Comparator; + +public final class SeqNoPrimaryTermFetchSubPhase implements FetchSubPhase { + @Override + public void hitsExecute(SearchContext context, SearchHit[] hits) throws IOException { + if (context.seqNoAndPrimaryTerm() == false) { + return; + } + + hits = hits.clone(); // don't modify the incoming hits + Arrays.sort(hits, Comparator.comparingInt(SearchHit::docId)); + + int lastReaderId = -1; + NumericDocValues seqNoField = null; + NumericDocValues primaryTermField = null; + for (SearchHit hit : hits) { + int readerId = ReaderUtil.subIndex(hit.docId(), context.searcher().getIndexReader().leaves()); + LeafReaderContext subReaderContext = context.searcher().getIndexReader().leaves().get(readerId); + if (lastReaderId != readerId) { + seqNoField = subReaderContext.reader().getNumericDocValues(SeqNoFieldMapper.NAME); + primaryTermField = subReaderContext.reader().getNumericDocValues(SeqNoFieldMapper.PRIMARY_TERM_NAME); + lastReaderId = readerId; + } + int docId = hit.docId() - subReaderContext.docBase; + long seqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; + long primaryTerm = SequenceNumbers.UNASSIGNED_PRIMARY_TERM; + // we have to check the primary term field as it is only assigned for non-nested documents + if (primaryTermField != null && primaryTermField.advanceExact(docId)) { + boolean found = seqNoField.advanceExact(docId); + assert found: "found seq no for " + docId + " but not a primary term"; + seqNo = seqNoField.longValue(); + primaryTerm = primaryTermField.longValue(); + } + hit.setSeqNo(seqNo); + hit.setPrimaryTerm(primaryTerm); + } + } +} diff --git a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 3a7fb9f823f3a..ecde28719cec6 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -422,6 +422,16 @@ public void version(boolean version) { in.version(version); } + @Override + public boolean seqNoAndPrimaryTerm() { + return in.seqNoAndPrimaryTerm(); + } + + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + in.seqNoAndPrimaryTerm(seqNoAndPrimaryTerm); + } + @Override public int[] docIdsToLoad() { return in.docIdsToLoad(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java index 768143dd8fb0b..bd6d9c501c8d1 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ObjectMapper; -import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.shard.IndexShard; @@ -46,6 +45,7 @@ import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.SearchContextAggregations; +import org.elasticsearch.search.collapse.CollapseContext; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -309,6 +309,12 @@ public InnerHitsContext innerHits() { public abstract void version(boolean version); + /** indicates whether the sequence number and primary term of the last modification to each hit should be returned */ + public abstract boolean seqNoAndPrimaryTerm(); + + /** controls whether the sequence number and primary term of the last modification to each hit should be returned */ + public abstract void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm); + public abstract int[] docIdsToLoad(); public abstract int docIdsToLoadFrom(); diff --git a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java index 8c8137f5e4345..fb4d233f10ee8 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java +++ b/server/src/main/java/org/elasticsearch/search/internal/SubSearchContext.java @@ -65,6 +65,7 @@ public class SubSearchContext extends FilteredSearchContext { private boolean explain; private boolean trackScores; private boolean version; + private boolean seqNoAndPrimaryTerm; public SubSearchContext(SearchContext context) { super(context); @@ -294,6 +295,16 @@ public void version(boolean version) { this.version = version; } + @Override + public boolean seqNoAndPrimaryTerm() { + return seqNoAndPrimaryTerm; + } + + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + this.seqNoAndPrimaryTerm = seqNoAndPrimaryTerm; + } + @Override public int[] docIdsToLoad() { return docIdsToLoad; diff --git a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java index 9378a2cdd86bb..328950e4f3569 100644 --- a/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/ExpandSearchPhaseTests.java @@ -241,6 +241,7 @@ public void run() throws IOException { public void testExpandRequestOptions() throws IOException { MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(1); boolean version = randomBoolean(); + final boolean seqNoAndTerm = randomBoolean(); mockSearchPhaseContext.searchTransport = new SearchTransportService(null, null) { @Override @@ -249,13 +250,14 @@ void sendExecuteMultiSearch(MultiSearchRequest request, SearchTask task, ActionL assertTrue(request.requests().stream().allMatch((r) -> "foo".equals(r.preference()))); assertTrue(request.requests().stream().allMatch((r) -> "baz".equals(r.routing()))); assertTrue(request.requests().stream().allMatch((r) -> version == r.source().version())); + assertTrue(request.requests().stream().allMatch((r) -> seqNoAndTerm == r.source().seqNoAndPrimaryTerm())); assertTrue(request.requests().stream().allMatch((r) -> postFilter.equals(r.source().postFilter()))); } }; mockSearchPhaseContext.getRequest().source(new SearchSourceBuilder() .collapse( new CollapseBuilder("someField") - .setInnerHits(new InnerHitBuilder().setName("foobarbaz").setVersion(version)) + .setInnerHits(new InnerHitBuilder().setName("foobarbaz").setVersion(version).setSeqNoAndPrimaryTerm(seqNoAndTerm)) ) .postFilter(QueryBuilders.existsQuery("foo"))) .preference("foobar") diff --git a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java index 5c76c77b5c888..257ee807419b6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/InnerHitBuilderTests.java @@ -140,6 +140,11 @@ public void testEqualsAndHashcode() { } } + public static InnerHitBuilder randomNestedInnerHits() { + InnerHitBuilder innerHitBuilder = randomInnerHits(); + innerHitBuilder.setSeqNoAndPrimaryTerm(false); // not supported by nested queries + return innerHitBuilder; + } public static InnerHitBuilder randomInnerHits() { InnerHitBuilder innerHits = new InnerHitBuilder(); innerHits.setName(randomAlphaOfLengthBetween(1, 16)); @@ -147,6 +152,7 @@ public static InnerHitBuilder randomInnerHits() { innerHits.setSize(randomIntBetween(0, 32)); innerHits.setExplain(randomBoolean()); innerHits.setVersion(randomBoolean()); + innerHits.setSeqNoAndPrimaryTerm(randomBoolean()); innerHits.setTrackScores(randomBoolean()); if (randomBoolean()) { innerHits.setStoredFieldNames(randomListStuff(16, () -> randomAlphaOfLengthBetween(1, 16))); @@ -189,6 +195,7 @@ static InnerHitBuilder mutate(InnerHitBuilder original) throws IOException { modifiers.add(() -> copy.setSize(randomValueOtherThan(copy.getSize(), () -> randomIntBetween(0, 128)))); modifiers.add(() -> copy.setExplain(!copy.isExplain())); modifiers.add(() -> copy.setVersion(!copy.isVersion())); + modifiers.add(() -> copy.setSeqNoAndPrimaryTerm(!copy.isSeqNoAndPrimaryTerm())); modifiers.add(() -> copy.setTrackScores(!copy.isTrackScores())); modifiers.add(() -> copy.setName(randomValueOtherThan(copy.getName(), () -> randomAlphaOfLengthBetween(1, 16)))); modifiers.add(() -> { diff --git a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java index 76479791283b4..ac9ae8d0fa7fb 100644 --- a/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/NestedQueryBuilderTests.java @@ -45,7 +45,7 @@ import java.util.Map; import static org.elasticsearch.index.IndexSettingsTests.newIndexMeta; -import static org.elasticsearch.index.query.InnerHitBuilderTests.randomInnerHits; +import static org.elasticsearch.index.query.InnerHitBuilderTests.randomNestedInnerHits; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; @@ -267,7 +267,7 @@ public void testThatUnrecognizedFromStringThrowsException() { } public void testInlineLeafInnerHitsNestedQuery() throws Exception { - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None); nestedQueryBuilder.innerHit(leafInnerHits); Map innerHitBuilders = new HashMap<>(); @@ -276,7 +276,7 @@ public void testInlineLeafInnerHitsNestedQuery() throws Exception { } public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() { - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits); BoolQueryBuilder boolQueryBuilder = new BoolQueryBuilder().should(nestedQueryBuilder); @@ -286,7 +286,7 @@ public void testInlineLeafInnerHitsNestedQueryViaBoolQuery() { } public void testInlineLeafInnerHitsNestedQueryViaConstantScoreQuery() { - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits); ConstantScoreQueryBuilder constantScoreQueryBuilder = new ConstantScoreQueryBuilder(nestedQueryBuilder); @@ -296,10 +296,10 @@ public void testInlineLeafInnerHitsNestedQueryViaConstantScoreQuery() { } public void testInlineLeafInnerHitsNestedQueryViaBoostingQuery() { - InnerHitBuilder leafInnerHits1 = randomInnerHits(); + InnerHitBuilder leafInnerHits1 = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder1 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits1); - InnerHitBuilder leafInnerHits2 = randomInnerHits(); + InnerHitBuilder leafInnerHits2 = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder2 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits2); BoostingQueryBuilder constantScoreQueryBuilder = new BoostingQueryBuilder(nestedQueryBuilder1, nestedQueryBuilder2); @@ -310,7 +310,7 @@ public void testInlineLeafInnerHitsNestedQueryViaBoostingQuery() { } public void testInlineLeafInnerHitsNestedQueryViaFunctionScoreQuery() { - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder nestedQueryBuilder = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None) .innerHit(leafInnerHits); FunctionScoreQueryBuilder functionScoreQueryBuilder = new FunctionScoreQueryBuilder(nestedQueryBuilder); @@ -330,7 +330,7 @@ public void testBuildIgnoreUnmappedNestQuery() throws Exception { when(mapperService.getIndexSettings()).thenReturn(settings); when(searchContext.mapperService()).thenReturn(mapperService); - InnerHitBuilder leafInnerHits = randomInnerHits(); + InnerHitBuilder leafInnerHits = randomNestedInnerHits(); NestedQueryBuilder query1 = new NestedQueryBuilder("path", new MatchAllQueryBuilder(), ScoreMode.None); query1.innerHit(leafInnerHits); final Map innerHitBuilders = new HashMap<>(); diff --git a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java index fee55f1e22f23..4831729201183 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchHitTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchHitTests.java @@ -90,6 +90,11 @@ public static SearchHit createTestItem(XContentType xContentType, boolean withOp if (randomBoolean()) { hit.version(randomLong()); } + + if (randomBoolean()) { + hit.version(randomNonNegativeLong()); + hit.version(randomLongBetween(1, Long.MAX_VALUE)); + } if (randomBoolean()) { hit.sortValues(SearchSortValuesTests.createTestItem(xContentType, transportSerialization)); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java index ce8d9c2da834f..4a5982b9dacfa 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptEngine; import org.elasticsearch.script.MockScriptPlugin; @@ -83,6 +84,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.lessThanOrEqualTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -578,6 +580,7 @@ public void testFieldCollapsing() throws Exception { } public void testFetchFeatures() { + final boolean seqNoAndTerm = randomBoolean(); SearchResponse response = client().prepareSearch("idx") .setQuery(matchQuery("text", "text").queryName("test")) .addAggregation(terms("terms") @@ -593,6 +596,7 @@ public void testFetchFeatures() { new Script(ScriptType.INLINE, MockScriptEngine.NAME, "5", Collections.emptyMap())) .fetchSource("text", null) .version(true) + .seqNoAndPrimaryTerm(seqNoAndTerm) ) ) .get(); @@ -620,6 +624,14 @@ public void testFetchFeatures() { long version = hit.getVersion(); assertThat(version, equalTo(1L)); + if (seqNoAndTerm) { + assertThat(hit.getSeqNo(), greaterThanOrEqualTo(0L)); + assertThat(hit.getPrimaryTerm(), greaterThanOrEqualTo(1L)); + } else { + assertThat(hit.getSeqNo(), equalTo(SequenceNumbers.UNASSIGNED_SEQ_NO)); + assertThat(hit.getPrimaryTerm(), equalTo(SequenceNumbers.UNASSIGNED_PRIMARY_TERM)); + } + assertThat(hit.getMatchedQueries()[0], equalTo("test")); DocumentField field = hit.field("field1"); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java index caf1d4ef4ee47..6ec1ea1cad301 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TopHitsTests.java @@ -54,6 +54,9 @@ protected final TopHitsAggregationBuilder createTestAggregatorBuilder() { if (randomBoolean()) { factory.version(randomBoolean()); } + if (randomBoolean()) { + factory.seqNoAndPrimaryTerm(randomBoolean()); + } if (randomBoolean()) { factory.trackScores(randomBoolean()); } diff --git a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java index 5d96cd37b054d..6ec2732aaf915 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java +++ b/test/framework/src/main/java/org/elasticsearch/search/RandomSearchRequestGenerator.java @@ -145,6 +145,9 @@ public static SearchSourceBuilder randomSearchSourceBuilder( if (randomBoolean()) { builder.version(randomBoolean()); } + if (randomBoolean()) { + builder.seqNoAndPrimaryTerm(randomBoolean()); + } if (randomBoolean()) { builder.trackScores(randomBoolean()); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java index 18edb5ec3790a..6a17f65790368 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestSearchContext.java @@ -504,6 +504,16 @@ public boolean version() { public void version(boolean version) { } + @Override + public boolean seqNoAndPrimaryTerm() { + return false; + } + + @Override + public void seqNoAndPrimaryTerm(boolean seqNoAndPrimaryTerm) { + + } + @Override public int[] docIdsToLoad() { return new int[0]; From 95a6951f78524cf3ff64df5677f8a840b21fb141 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 23 Jan 2019 09:46:28 +0100 Subject: [PATCH 26/39] Use new bulk API endpoint in the docs (#37698) This change switches to using the typeless bulk API endpoint in the documentation snippets where possible --- .../bucket/adjacency-matrix-aggregation.asciidoc | 2 +- .../aggregations/bucket/filters-aggregation.asciidoc | 2 +- .../metrics/scripted-metric-aggregation.asciidoc | 2 +- docs/reference/getting-started.asciidoc | 6 +++--- docs/reference/search/validate.asciidoc | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/reference/aggregations/bucket/adjacency-matrix-aggregation.asciidoc b/docs/reference/aggregations/bucket/adjacency-matrix-aggregation.asciidoc index 1806f05d2c686..5cd67877460bf 100644 --- a/docs/reference/aggregations/bucket/adjacency-matrix-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/adjacency-matrix-aggregation.asciidoc @@ -30,7 +30,7 @@ Example: [source,js] -------------------------------------------------- -PUT /emails/_doc/_bulk?refresh +PUT /emails/_bulk?refresh { "index" : { "_id" : 1 } } { "accounts" : ["hillary", "sidney"]} { "index" : { "_id" : 2 } } diff --git a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc index b7e3b1edf10d2..94b91654f0c7f 100644 --- a/docs/reference/aggregations/bucket/filters-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/filters-aggregation.asciidoc @@ -9,7 +9,7 @@ Example: [source,js] -------------------------------------------------- -PUT /logs/_doc/_bulk?refresh +PUT /logs/_bulk?refresh { "index" : { "_id" : 1 } } { "body" : "warning: page could not be rendered" } { "index" : { "_id" : 2 } } diff --git a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc index 69c9d50690196..1f2ec113d31fd 100644 --- a/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -147,7 +147,7 @@ Imagine a situation where you index the following documents into an index with 2 [source,js] -------------------------------------------------- -PUT /transactions/_doc/_bulk?refresh +PUT /transactions/_bulk?refresh {"index":{"_id":1}} {"type": "sale","amount": 80} {"index":{"_id":2}} diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index b79dd5c36c244..d32eeaff8c719 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -633,7 +633,7 @@ As a quick example, the following call indexes two documents (ID 1 - John Doe an [source,js] -------------------------------------------------- -POST /customer/_doc/_bulk?pretty +POST /customer/_bulk?pretty {"index":{"_id":"1"}} {"name": "John Doe" } {"index":{"_id":"2"}} @@ -645,7 +645,7 @@ This example updates the first document (ID of 1) and then deletes the second do [source,sh] -------------------------------------------------- -POST /customer/_doc/_bulk?pretty +POST /customer/_bulk?pretty {"update":{"_id":"1"}} {"doc": { "name": "John Doe becomes Jane Doe" } } {"delete":{"_id":"2"}} @@ -692,7 +692,7 @@ You can download the sample dataset (accounts.json) from https://github.com/elas [source,sh] -------------------------------------------------- -curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_doc/_bulk?pretty&refresh" --data-binary "@accounts.json" +curl -H "Content-Type: application/json" -XPOST "localhost:9200/bank/_bulk?pretty&refresh" --data-binary "@accounts.json" curl "localhost:9200/_cat/indices?v" -------------------------------------------------- // NOTCONSOLE diff --git a/docs/reference/search/validate.asciidoc b/docs/reference/search/validate.asciidoc index c17f1393e1520..47eab847336ce 100644 --- a/docs/reference/search/validate.asciidoc +++ b/docs/reference/search/validate.asciidoc @@ -6,7 +6,7 @@ without executing it. We'll use the following test data to explain _validate: [source,js] -------------------------------------------------- -PUT twitter/_doc/_bulk?refresh +PUT twitter/_bulk?refresh {"index":{"_id":1}} {"user" : "kimchy", "post_date" : "2009-11-15T14:12:12", "message" : "trying out Elasticsearch"} {"index":{"_id":2}} From 6926a73d618be8bb8329e4f98eadcdf1fb4bb17d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 23 Jan 2019 09:46:49 +0100 Subject: [PATCH 27/39] Fix edge case in PutMappingRequestTests (#37665) The recently introduced client side PutMappingRequestTests has an edge case that leads to failing tests. When the "source" or the original request is `null` it gets rendered to xContent as an empty object, which by the test class itself is parsed back as an empty map. For this reason the original and the parsed request differ (one has an empty source, one an empty map). This fixes this edge case by assuming an empty map means a null source in the request. In practice the distinction doesn't matter because all the client side request does is write itself to xContent, which gives the same result regardless of whether `source` is null or empty. Closes #37654 --- .../indices/PutMappingRequestTests.java | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java index aff64533ece0f..50224aa1b9ad7 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/indices/PutMappingRequestTests.java @@ -19,15 +19,14 @@ package org.elasticsearch.client.indices; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.RandomCreateIndexGenerator; import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.util.Map; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37654") public class PutMappingRequestTests extends AbstractXContentTestCase { @Override @@ -47,7 +46,10 @@ protected PutMappingRequest createTestInstance() { @Override protected PutMappingRequest doParseInstance(XContentParser parser) throws IOException { PutMappingRequest request = new PutMappingRequest(); - request.source(parser.map()); + Map map = parser.map(); + if (map.isEmpty() == false) { + request.source(map); + } return request; } @@ -58,11 +60,16 @@ protected boolean supportsUnknownFields() { @Override protected void assertEqualInstances(PutMappingRequest expected, PutMappingRequest actual) { - try (XContentParser expectedJson = createParser(expected.xContentType().xContent(), expected.source()); - XContentParser actualJson = createParser(actual.xContentType().xContent(), actual.source())) { - assertEquals(expectedJson.mapOrdered(), actualJson.mapOrdered()); - } catch (IOException e) { - throw new RuntimeException(e); + if (actual.source() != null) { + try (XContentParser expectedJson = createParser(expected.xContentType().xContent(), expected.source()); + XContentParser actualJson = createParser(actual.xContentType().xContent(), actual.source())) { + assertEquals(expectedJson.mapOrdered(), actualJson.mapOrdered()); + } catch (IOException e) { + throw new RuntimeException(e); + } + } else { + // if the original `source` is null, the parsed source should be so too + assertNull(expected.source()); } } } From b3f9becf5f99e718b751af81bb31ae1c0779fdcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Wed, 23 Jan 2019 09:48:00 +0100 Subject: [PATCH 28/39] Modify removal_of_types.asciidoc (#37648) After switching the default behaviour of "include_type_name" to "false" in 7.0, some parts of the types removal documentation can be adapted as well. --- .../mapping/removal_of_types.asciidoc | 30 +++++++++++-------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/docs/reference/mapping/removal_of_types.asciidoc b/docs/reference/mapping/removal_of_types.asciidoc index 251025c1ba24c..ee5ee4b4fe664 100644 --- a/docs/reference/mapping/removal_of_types.asciidoc +++ b/docs/reference/mapping/removal_of_types.asciidoc @@ -112,7 +112,7 @@ have looked something like this: [source,js] ---- -PUT twitter?include_type_name=true +PUT twitter { "mappings": { "user": { @@ -157,16 +157,16 @@ GET twitter/tweet/_search ---- // NOTCONSOLE -You could achieve the same thing by adding a custom `type` field as follows: +You can achieve the same thing by adding a custom `type` field as follows: [source,js] ---- -PUT twitter?include_type_name=true +PUT twitter?include_type_name=true <1> { "mappings": { "_doc": { "properties": { - "type": { "type": "keyword" }, <1> + "type": { "type": "keyword" }, <2> "name": { "type": "text" }, "user_name": { "type": "keyword" }, "email": { "type": "keyword" }, @@ -204,7 +204,7 @@ GET twitter/_search }, "filter": { "match": { - "type": "tweet" <1> + "type": "tweet" <2> } } } @@ -212,7 +212,9 @@ GET twitter/_search } ---- // NOTCONSOLE -<1> The explicit `type` field takes the place of the implicit `_type` field. +<1> Use `include_type_name=true` in case need to use the "old" syntax including the "_doc" object like +in this example +<2> The explicit `type` field takes the place of the implicit `_type` field. [float] ==== Parent/Child without mapping types @@ -299,7 +301,7 @@ This first example splits our `twitter` index into a `tweets` index and a [source,js] ---- -PUT users?include_type_name=true +PUT users { "settings": { "index.mapping.single_type": true @@ -321,7 +323,7 @@ PUT users?include_type_name=true } } -PUT tweets?include_type_name=true +PUT tweets { "settings": { "index.mapping.single_type": true @@ -376,7 +378,7 @@ documents of different types which have conflicting IDs: [source,js] ---- -PUT new_twitter?include_type_name=true +PUT new_twitter { "mappings": { "_doc": { @@ -427,10 +429,12 @@ POST _reindex [float] === Use `include_type_name=false` to prepare for upgrade to 8.0 -Index creation, mappings and document APIs support the `include_type_name` -option. When set to `false`, this option enables the behavior that will become -default in 8.0 when types are removed. See some examples of interactions with -Elasticsearch with this option turned off: +Index creation and mapping APIs support a new `include_type_name` url parameter +starting with version 6.7. It will default to `true` in version 6.7, default to +`false` in version 7.0 and will be removed in version 8.0. When set to `true`, +this parameter enables the pre-7.0 behavior of using type names in the API. + +See some examples of interactions with Elasticsearch with this option turned off: [float] ==== Index creation From 7b3dd3022da261a6f299567091034548a80a5ebd Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 23 Jan 2019 09:37:37 +0000 Subject: [PATCH 29/39] [ML] Update ML results mappings on process start (#37706) This change moves the update to the results index mappings from the open job action to the code that starts the autodetect process. When a rolling upgrade is performed we need to update the mappings for already-open jobs that are reassigned from an old version node to a new version node, but the open job action is not called in this case. Closes #37607 --- .../persistence/ElasticsearchMappings.java | 114 +++++++++++++++++ .../ElasticsearchMappingsTests.java | 100 +++++++++++++++ .../ml/action/TransportOpenJobAction.java | 121 +----------------- .../autodetect/AutodetectProcessManager.java | 18 ++- .../action/TransportOpenJobActionTests.java | 93 -------------- .../AutodetectProcessManagerTests.java | 1 + .../upgrades/MlMappingsUpgradeIT.java | 101 +++++++++++++++ 7 files changed, 336 insertions(+), 212 deletions(-) create mode 100644 x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java index fb0db771fa581..0eb2e666916dc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappings.java @@ -5,8 +5,24 @@ */ package org.elasticsearch.xpack.core.ml.job.persistence; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasOrIndex; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DelayedDataCheckConfig; @@ -38,10 +54,16 @@ import org.elasticsearch.xpack.core.ml.notifications.AuditMessage; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.List; +import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; +import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; /** * Static methods to create Elasticsearch index mappings for the autodetect @@ -107,6 +129,8 @@ public class ElasticsearchMappings { static final String RAW = "raw"; + private static final Logger logger = LogManager.getLogger(ElasticsearchMappings.class); + private ElasticsearchMappings() { } @@ -964,4 +988,94 @@ public static XContentBuilder auditMessageMapping() throws IOException { .endObject() .endObject(); } + + static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndices, Version minVersion) throws IOException { + List indicesToUpdate = new ArrayList<>(); + + ImmutableOpenMap> currentMapping = state.metaData().findMappings(concreteIndices, + new String[] {DOC_TYPE}, MapperPlugin.NOOP_FIELD_FILTER); + + for (String index : concreteIndices) { + ImmutableOpenMap innerMap = currentMapping.get(index); + if (innerMap != null) { + MappingMetaData metaData = innerMap.get(DOC_TYPE); + try { + @SuppressWarnings("unchecked") + Map meta = (Map) metaData.sourceAsMap().get("_meta"); + if (meta != null) { + String versionString = (String) meta.get("version"); + if (versionString == null) { + logger.info("Version of mappings for [{}] not found, recreating", index); + indicesToUpdate.add(index); + continue; + } + + Version mappingVersion = Version.fromString(versionString); + + if (mappingVersion.onOrAfter(minVersion)) { + continue; + } else { + logger.info("Mappings for [{}] are outdated [{}], updating it[{}].", index, mappingVersion, Version.CURRENT); + indicesToUpdate.add(index); + continue; + } + } else { + logger.info("Version of mappings for [{}] not found, recreating", index); + indicesToUpdate.add(index); + continue; + } + } catch (Exception e) { + logger.error(new ParameterizedMessage("Failed to retrieve mapping version for [{}], recreating", index), e); + indicesToUpdate.add(index); + continue; + } + } else { + logger.info("No mappings found for [{}], recreating", index); + indicesToUpdate.add(index); + } + } + return indicesToUpdate.toArray(new String[indicesToUpdate.size()]); + } + + public static void addDocMappingIfMissing(String alias, CheckedSupplier mappingSupplier, + Client client, ClusterState state, ActionListener listener) { + AliasOrIndex aliasOrIndex = state.metaData().getAliasAndIndexLookup().get(alias); + if (aliasOrIndex == null) { + // The index has never been created yet + listener.onResponse(true); + return; + } + String[] concreteIndices = aliasOrIndex.getIndices().stream().map(IndexMetaData::getIndex).map(Index::getName) + .toArray(String[]::new); + + String[] indicesThatRequireAnUpdate; + try { + indicesThatRequireAnUpdate = mappingRequiresUpdate(state, concreteIndices, Version.CURRENT); + } catch (IOException e) { + listener.onFailure(e); + return; + } + + if (indicesThatRequireAnUpdate.length > 0) { + try (XContentBuilder mapping = mappingSupplier.get()) { + PutMappingRequest putMappingRequest = new PutMappingRequest(indicesThatRequireAnUpdate); + putMappingRequest.type(DOC_TYPE); + putMappingRequest.source(mapping); + executeAsyncWithOrigin(client, ML_ORIGIN, PutMappingAction.INSTANCE, putMappingRequest, + ActionListener.wrap(response -> { + if (response.isAcknowledged()) { + listener.onResponse(true); + } else { + listener.onFailure(new ElasticsearchException("Attempt to put missing mapping in indices " + + Arrays.toString(indicesThatRequireAnUpdate) + " was not acknowledged")); + } + }, listener::onFailure)); + } catch (IOException e) { + listener.onFailure(e); + } + } else { + logger.trace("Mappings are up to date."); + listener.onResponse(true); + } + } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java index e4ce536a3ccf6..e87515afadd1d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/job/persistence/ElasticsearchMappingsTests.java @@ -9,10 +9,18 @@ import com.fasterxml.jackson.core.JsonParseException; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MappingMetaData; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.config.ModelPlotConfig; @@ -30,6 +38,8 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -128,6 +138,96 @@ public void testTermFieldMapping() throws IOException { assertNull(instanceMapping); } + + public void testMappingRequiresUpdateNoMapping() throws IOException { + ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); + ClusterState cs = csBuilder.build(); + String[] indices = new String[] { "no_index" }; + + assertArrayEquals(new String[] { "no_index" }, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateNullMapping() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("null_mapping", null)); + String[] indices = new String[] { "null_index" }; + assertArrayEquals(indices, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateNoVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("no_version_field", "NO_VERSION_FIELD")); + String[] indices = new String[] { "no_version_field" }; + assertArrayEquals(indices, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateRecentMappingVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_current", Version.CURRENT.toString())); + String[] indices = new String[] { "version_current" }; + assertArrayEquals(new String[] {}, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateMaliciousMappingVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData( + Collections.singletonMap("version_current", Collections.singletonMap("nested", "1.0"))); + String[] indices = new String[] { "version_nested" }; + assertArrayEquals(indices, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateBogusMappingVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_bogus", "0.0")); + String[] indices = new String[] { "version_bogus" }; + assertArrayEquals(indices, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, Version.CURRENT)); + } + + public void testMappingRequiresUpdateNewerMappingVersion() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_newer", Version.CURRENT)); + String[] indices = new String[] { "version_newer" }; + assertArrayEquals(new String[] {}, ElasticsearchMappings.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousVersion())); + } + + public void testMappingRequiresUpdateNewerMappingVersionMinor() throws IOException { + ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_newer_minor", Version.CURRENT)); + String[] indices = new String[] { "version_newer_minor" }; + assertArrayEquals(new String[] {}, + ElasticsearchMappings.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousMinorVersion())); + } + + + private ClusterState getClusterStateWithMappingsWithMetaData(Map namesAndVersions) throws IOException { + MetaData.Builder metaDataBuilder = MetaData.builder(); + + for (Map.Entry entry : namesAndVersions.entrySet()) { + + String indexName = entry.getKey(); + Object version = entry.getValue(); + + IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); + indexMetaData.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); + + Map mapping = new HashMap<>(); + Map properties = new HashMap<>(); + for (int i = 0; i < 10; i++) { + properties.put("field" + i, Collections.singletonMap("type", "string")); + } + mapping.put("properties", properties); + + Map meta = new HashMap<>(); + if (version != null && version.equals("NO_VERSION_FIELD") == false) { + meta.put("version", version); + } + mapping.put("_meta", meta); + + indexMetaData.putMapping(new MappingMetaData(ElasticsearchMappings.DOC_TYPE, mapping)); + + metaDataBuilder.put(indexMetaData); + } + MetaData metaData = metaDataBuilder.build(); + + ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); + csBuilder.metaData(metaData); + return csBuilder.build(); + } + private Set collectResultsDocFieldNames() throws IOException { // Only the mappings for the results index should be added below. Do NOT add mappings for other indexes here. return collectFieldNames(ElasticsearchMappings.resultsMapping()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index 2da89c359e793..a5aed9b5b5957 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -7,14 +7,10 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; @@ -23,21 +19,14 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.AliasOrIndex; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.Index; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; @@ -45,7 +34,6 @@ import org.elasticsearch.persistent.PersistentTasksCustomMetaData; import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.persistent.PersistentTasksService; -import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -69,9 +57,7 @@ import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; -import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.LinkedList; import java.util.List; @@ -405,54 +391,6 @@ private static boolean jobHasRules(Job job) { return job.getAnalysisConfig().getDetectors().stream().anyMatch(d -> d.getRules().isEmpty() == false); } - static String[] mappingRequiresUpdate(ClusterState state, String[] concreteIndices, Version minVersion, - Logger logger) throws IOException { - List indicesToUpdate = new ArrayList<>(); - - ImmutableOpenMap> currentMapping = state.metaData().findMappings(concreteIndices, - new String[] { ElasticsearchMappings.DOC_TYPE }, MapperPlugin.NOOP_FIELD_FILTER); - - for (String index : concreteIndices) { - ImmutableOpenMap innerMap = currentMapping.get(index); - if (innerMap != null) { - MappingMetaData metaData = innerMap.get(ElasticsearchMappings.DOC_TYPE); - try { - Map meta = (Map) metaData.sourceAsMap().get("_meta"); - if (meta != null) { - String versionString = (String) meta.get("version"); - if (versionString == null) { - logger.info("Version of mappings for [{}] not found, recreating", index); - indicesToUpdate.add(index); - continue; - } - - Version mappingVersion = Version.fromString(versionString); - - if (mappingVersion.onOrAfter(minVersion)) { - continue; - } else { - logger.info("Mappings for [{}] are outdated [{}], updating it[{}].", index, mappingVersion, Version.CURRENT); - indicesToUpdate.add(index); - continue; - } - } else { - logger.info("Version of mappings for [{}] not found, recreating", index); - indicesToUpdate.add(index); - continue; - } - } catch (Exception e) { - logger.error(new ParameterizedMessage("Failed to retrieve mapping version for [{}], recreating", index), e); - indicesToUpdate.add(index); - continue; - } - } else { - logger.info("No mappings found for [{}], recreating", index); - indicesToUpdate.add(index); - } - } - return indicesToUpdate.toArray(new String[indicesToUpdate.size()]); - } - @Override protected String executor() { // This api doesn't do heavy or blocking operations (just delegates PersistentTasksService), @@ -527,25 +465,18 @@ public void onFailure(Exception e) { ); // Try adding state doc mapping - ActionListener resultsPutMappingHandler = ActionListener.wrap( + ActionListener getJobHandler = ActionListener.wrap( response -> { - addDocMappingIfMissing(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), ElasticsearchMappings::stateMapping, - state, jobUpdateListener); + ElasticsearchMappings.addDocMappingIfMissing(AnomalyDetectorsIndex.jobStateIndexWriteAlias(), + ElasticsearchMappings::stateMapping, client, state, jobUpdateListener); }, listener::onFailure ); // Get the job config jobConfigProvider.getJob(jobParams.getJobId(), ActionListener.wrap( builder -> { - try { - jobParams.setJob(builder.build()); - - // Try adding results doc mapping - addDocMappingIfMissing(AnomalyDetectorsIndex.jobResultsAliasedName(jobParams.getJobId()), - ElasticsearchMappings::resultsMapping, state, resultsPutMappingHandler); - } catch (Exception e) { - listener.onFailure(e); - } + jobParams.setJob(builder.build()); + getJobHandler.onResponse(null); }, listener::onFailure )); @@ -620,48 +551,6 @@ public void onFailure(Exception e) { ); } - private void addDocMappingIfMissing(String alias, CheckedSupplier mappingSupplier, ClusterState state, - ActionListener listener) { - AliasOrIndex aliasOrIndex = state.metaData().getAliasAndIndexLookup().get(alias); - if (aliasOrIndex == null) { - // The index has never been created yet - listener.onResponse(true); - return; - } - String[] concreteIndices = aliasOrIndex.getIndices().stream().map(IndexMetaData::getIndex).map(Index::getName) - .toArray(String[]::new); - - String[] indicesThatRequireAnUpdate; - try { - indicesThatRequireAnUpdate = mappingRequiresUpdate(state, concreteIndices, Version.CURRENT, logger); - } catch (IOException e) { - listener.onFailure(e); - return; - } - - if (indicesThatRequireAnUpdate.length > 0) { - try (XContentBuilder mapping = mappingSupplier.get()) { - PutMappingRequest putMappingRequest = new PutMappingRequest(indicesThatRequireAnUpdate); - putMappingRequest.type(ElasticsearchMappings.DOC_TYPE); - putMappingRequest.source(mapping); - executeAsyncWithOrigin(client, ML_ORIGIN, PutMappingAction.INSTANCE, putMappingRequest, - ActionListener.wrap(response -> { - if (response.isAcknowledged()) { - listener.onResponse(true); - } else { - listener.onFailure(new ElasticsearchException("Attempt to put missing mapping in indices " - + Arrays.toString(indicesThatRequireAnUpdate) + " was not acknowledged")); - } - }, listener::onFailure)); - } catch (IOException e) { - listener.onFailure(e); - } - } else { - logger.trace("Mappings are uptodate."); - listener.onResponse(true); - } - } - public static class OpenJobPersistentTasksExecutor extends PersistentTasksExecutor { private static final Logger logger = LogManager.getLogger(OpenJobPersistentTasksExecutor.class); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java index 9695d73ed05c5..dd3656ee04b67 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManager.java @@ -40,6 +40,7 @@ import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.config.MlFilter; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.job.process.autodetect.output.FlushAcknowledgement; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.DataCounts; import org.elasticsearch.xpack.core.ml.job.process.autodetect.state.ModelSizeStats; @@ -417,7 +418,9 @@ public void onFailure(Exception e) { public void openJob(JobTask jobTask, ClusterState clusterState, Consumer closeHandler) { String jobId = jobTask.getJobId(); logger.info("Opening job [{}]", jobId); - AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, ActionListener.wrap( + + // Start the process + ActionListener stateAliasHandler = ActionListener.wrap( r -> { jobManager.getJob(jobId, ActionListener.wrap( job -> { @@ -427,7 +430,6 @@ public void openJob(JobTask jobTask, ClusterState clusterState, Consumer { // We need to fork, otherwise we restore model state from a network thread (several GET api calls): @@ -477,7 +479,17 @@ protected void doRun() { closeHandler )); }, - closeHandler)); + closeHandler); + + // Make sure the state index and alias exist + ActionListener resultsMappingUpdateHandler = ActionListener.wrap( + ack -> AnomalyDetectorsIndex.createStateIndexAndAliasIfNecessary(client, clusterState, stateAliasHandler), + closeHandler + ); + + // Try adding the results doc mapping - this updates to the latest version if an old mapping is present + ElasticsearchMappings.addDocMappingIfMissing(AnomalyDetectorsIndex.jobResultsAliasedName(jobId), + ElasticsearchMappings::resultsMapping, client, clusterState, resultsMappingUpdateHandler); } private void createProcessAndSetRunning(ProcessContext processContext, Job job, AutodetectParams params, Consumer handler) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java index 9bd32bdc9eff3..da54b33d27597 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportOpenJobActionTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.cluster.metadata.AliasMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -38,7 +37,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; import org.elasticsearch.xpack.core.ml.MlMetaIndex; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -53,7 +51,6 @@ import org.elasticsearch.xpack.core.ml.job.config.RuleCondition; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndexFields; -import org.elasticsearch.xpack.core.ml.job.persistence.ElasticsearchMappings; import org.elasticsearch.xpack.core.ml.notifications.AuditorField; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; @@ -61,7 +58,6 @@ import org.elasticsearch.xpack.ml.support.BaseMlIntegTestCase; import org.junit.Before; -import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; import java.util.Arrays; @@ -486,59 +482,6 @@ public void testVerifyIndicesPrimaryShardsAreActive() { assertEquals(indexToRemove, result.get(0)); } - public void testMappingRequiresUpdateNoMapping() throws IOException { - ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); - ClusterState cs = csBuilder.build(); - String[] indices = new String[] { "no_index" }; - - assertArrayEquals(new String[] { "no_index" }, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateNullMapping() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("null_mapping", null)); - String[] indices = new String[] { "null_index" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateNoVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("no_version_field", "NO_VERSION_FIELD")); - String[] indices = new String[] { "no_version_field" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateRecentMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_current", Version.CURRENT.toString())); - String[] indices = new String[] { "version_current" }; - assertArrayEquals(new String[] {}, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateMaliciousMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData( - Collections.singletonMap("version_current", Collections.singletonMap("nested", "1.0"))); - String[] indices = new String[] { "version_nested" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateBogusMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_bogus", "0.0")); - String[] indices = new String[] { "version_bogus" }; - assertArrayEquals(indices, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, Version.CURRENT, logger)); - } - - public void testMappingRequiresUpdateNewerMappingVersion() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_newer", Version.CURRENT)); - String[] indices = new String[] { "version_newer" }; - assertArrayEquals(new String[] {}, TransportOpenJobAction.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousVersion(), - logger)); - } - - public void testMappingRequiresUpdateNewerMappingVersionMinor() throws IOException { - ClusterState cs = getClusterStateWithMappingsWithMetaData(Collections.singletonMap("version_newer_minor", Version.CURRENT)); - String[] indices = new String[] { "version_newer_minor" }; - assertArrayEquals(new String[] {}, - TransportOpenJobAction.mappingRequiresUpdate(cs, indices, VersionUtils.getPreviousMinorVersion(), logger)); - } - public void testNodeNameAndVersion() { TransportAddress ta = new TransportAddress(InetAddress.getLoopbackAddress(), 9300); Map attributes = new HashMap<>(); @@ -641,42 +584,6 @@ private void addIndices(MetaData.Builder metaData, RoutingTable.Builder routingT } } - private ClusterState getClusterStateWithMappingsWithMetaData(Map namesAndVersions) throws IOException { - MetaData.Builder metaDataBuilder = MetaData.builder(); - - for (Map.Entry entry : namesAndVersions.entrySet()) { - - String indexName = entry.getKey(); - Object version = entry.getValue(); - - IndexMetaData.Builder indexMetaData = IndexMetaData.builder(indexName); - indexMetaData.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)); - - Map mapping = new HashMap<>(); - Map properties = new HashMap<>(); - for (int i = 0; i < 10; i++) { - properties.put("field" + i, Collections.singletonMap("type", "string")); - } - mapping.put("properties", properties); - - Map meta = new HashMap<>(); - if (version != null && version.equals("NO_VERSION_FIELD") == false) { - meta.put("version", version); - } - mapping.put("_meta", meta); - - indexMetaData.putMapping(new MappingMetaData(ElasticsearchMappings.DOC_TYPE, mapping)); - - metaDataBuilder.put(indexMetaData); - } - MetaData metaData = metaDataBuilder.build(); - - ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); - csBuilder.metaData(metaData); - return csBuilder.build(); - } - private static Job jobWithRules(String jobId) { DetectionRule rule = new DetectionRule.Builder(Collections.singletonList( new RuleCondition(RuleCondition.AppliesTo.TYPICAL, Operator.LT, 100.0) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java index 9024d0edcee9c..ba319f1a90781 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectProcessManagerTests.java @@ -141,6 +141,7 @@ public void setup() throws Exception { when(metaData.getAliasAndIndexLookup()).thenReturn(aliasOrIndexSortedMap); clusterState = mock(ClusterState.class); when(clusterState.getMetaData()).thenReturn(metaData); + when(clusterState.metaData()).thenReturn(metaData); doAnswer(invocationOnMock -> { @SuppressWarnings("unchecked") diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java new file mode 100644 index 0000000000000..5602f14ef2267 --- /dev/null +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java @@ -0,0 +1,101 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ml.job.config.AnalysisConfig; +import org.elasticsearch.client.ml.job.config.DataDescription; +import org.elasticsearch.client.ml.job.config.Detector; +import org.elasticsearch.client.ml.job.config.Job; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.xpack.core.ml.job.persistence.AnomalyDetectorsIndex; +import org.elasticsearch.xpack.test.rest.XPackRestTestHelper; + +import java.io.IOException; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class MlMappingsUpgradeIT extends AbstractUpgradeTestCase { + + private static final String JOB_ID = "ml-mappings-upgrade-job"; + + @Override + protected Collection templatesToWaitFor() { + return Stream.concat(XPackRestTestHelper.ML_POST_V660_TEMPLATES.stream(), + super.templatesToWaitFor().stream()).collect(Collectors.toSet()); + } + + /** + * The purpose of this test is to ensure that when a job is open through a rolling upgrade we upgrade the results + * index mappings when it is assigned to an upgraded node even if no other ML endpoint is called after the upgrade + */ + public void testMappingsUpgrade() throws Exception { + + switch (CLUSTER_TYPE) { + case OLD: + createAndOpenTestJob(); + break; + case MIXED: + // We don't know whether the job is on an old or upgraded node, so cannot assert that the mappings have been upgraded + break; + case UPGRADED: + assertUpgradedMappings(); + break; + default: + throw new UnsupportedOperationException("Unknown cluster type [" + CLUSTER_TYPE + "]"); + } + } + + private void createAndOpenTestJob() throws IOException { + + Detector.Builder d = new Detector.Builder("metric", "responsetime"); + d.setByFieldName("airline"); + AnalysisConfig.Builder analysisConfig = new AnalysisConfig.Builder(Collections.singletonList(d.build())); + analysisConfig.setBucketSpan(TimeValue.timeValueMinutes(10)); + Job.Builder job = new Job.Builder(JOB_ID); + job.setAnalysisConfig(analysisConfig); + job.setDataDescription(new DataDescription.Builder()); + + Request putJob = new Request("PUT", "_ml/anomaly_detectors/" + JOB_ID); + putJob.setJsonEntity(Strings.toString(job.build())); + Response response = client().performRequest(putJob); + assertEquals(200, response.getStatusLine().getStatusCode()); + + Request openJob = new Request("POST", "_ml/anomaly_detectors/" + JOB_ID + "/_open"); + response = client().performRequest(openJob); + assertEquals(200, response.getStatusLine().getStatusCode()); + } + + @SuppressWarnings("unchecked") + private void assertUpgradedMappings() throws Exception { + + assertBusy(() -> { + Request getMappings = new Request("GET", AnomalyDetectorsIndex.resultsWriteAlias(JOB_ID) + "/_mappings"); + Response response = client().performRequest(getMappings); + + Map responseLevel = entityAsMap(response); + assertNotNull(responseLevel); + Map indexLevel = (Map) responseLevel.get(".ml-anomalies-shared"); + assertNotNull(indexLevel); + Map mappingsLevel = (Map) indexLevel.get("mappings"); + assertNotNull(mappingsLevel); + Map metaLevel = (Map) mappingsLevel.get("_meta"); + assertEquals(Collections.singletonMap("version", Version.CURRENT.toString()), metaLevel); + Map propertiesLevel = (Map) mappingsLevel.get("properties"); + assertNotNull(propertiesLevel); + // TODO: as the years go by, the field we assert on here should be changed + // to the most recent field we've added that is NOT of type "keyword" + Map fieldLevel = (Map) propertiesLevel.get("multi_bucket_impact"); + assertEquals(Collections.singletonMap("type", "double"), fieldLevel); + }); + } +} From daa2ec8a605d385a65b9ab3e89d016b3fd0dffe2 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Wed, 23 Jan 2019 10:40:05 +0100 Subject: [PATCH 30/39] Switch mapping/aggregations over to java time (#36363) This commit moves the aggregation and mapping code from joda time to java time. This includes field mappers, root object mappers, aggregations with date histograms, query builders and a lot of changes within tests. The cut-over to java time is a requirement so that we can support nanoseconds properly in a future field mapper. Relates #27330 --- .../time/DateFormatterBenchmark.java | 1 - .../index/mapper/DenseVectorFieldMapper.java | 4 +- .../index/mapper/ScaledFloatFieldMapper.java | 4 +- .../index/mapper/SparseVectorFieldMapper.java | 4 +- .../ICUCollationKeywordFieldMapper.java | 4 +- .../60_pipeline_timestamp_date_mapping.yml | 2 +- ...g.yml => 180_locale_dependent_mapping.yml} | 0 .../cluster/metadata/IndexGraveyard.java | 3 +- .../metadata/IndexNameExpressionResolver.java | 13 +- .../cluster/routing/UnassignedInfo.java | 3 +- .../org/elasticsearch/common/Rounding.java | 20 +- .../java/org/elasticsearch/common/Table.java | 3 +- .../common/io/stream/StreamInput.java | 17 + .../common/io/stream/StreamOutput.java | 21 +- .../common/joda/JodaDateFormatter.java | 21 +- .../common/joda/JodaDateMathParser.java | 7 +- .../common/time/DateFormatter.java | 17 +- .../common/time/DateFormatters.java | 149 +++- .../common/time/DateMathParser.java | 9 +- .../elasticsearch/common/time/DateUtils.java | 10 +- .../elasticsearch/common/time/EpochTime.java | 26 +- .../common/time/JavaDateFormatter.java | 18 +- .../common/time/JavaDateMathParser.java | 32 +- .../XContentElasticsearchExtension.java | 7 +- .../index/mapper/BinaryFieldMapper.java | 5 +- .../index/mapper/BooleanFieldMapper.java | 4 +- .../index/mapper/DateFieldMapper.java | 85 +- .../index/mapper/DocumentParser.java | 12 +- .../index/mapper/IpFieldMapper.java | 4 +- .../index/mapper/MappedFieldType.java | 14 +- .../index/mapper/NumberFieldMapper.java | 4 +- .../index/mapper/RangeFieldMapper.java | 53 +- .../index/mapper/RootObjectMapper.java | 6 +- .../index/mapper/SimpleMappedFieldType.java | 7 +- .../index/mapper/TypeParsers.java | 1 - .../index/query/QueryStringQueryBuilder.java | 22 +- .../index/query/RangeQueryBuilder.java | 65 +- .../index/search/QueryStringQueryParser.java | 6 +- .../elasticsearch/ingest/IngestDocument.java | 6 +- .../elasticsearch/monitor/jvm/HotThreads.java | 3 +- .../rest/action/cat/RestIndicesAction.java | 5 +- .../rest/action/cat/RestSnapshotAction.java | 3 +- .../rest/action/cat/RestTasksAction.java | 3 +- .../script/JodaCompatibleZonedDateTime.java | 3 +- .../script/ScoreScriptUtils.java | 6 +- .../elasticsearch/search/DocValueFormat.java | 31 +- .../DateHistogramValuesSourceBuilder.java | 34 +- .../composite/RoundingValuesSource.java | 2 +- .../AutoDateHistogramAggregationBuilder.java | 29 +- .../AutoDateHistogramAggregator.java | 2 +- .../DateHistogramAggregationBuilder.java | 89 +- .../histogram/DateHistogramAggregator.java | 2 +- .../DateHistogramAggregatorFactory.java | 2 +- .../bucket/histogram/ExtendedBounds.java | 2 +- .../histogram/InternalAutoDateHistogram.java | 8 +- .../histogram/InternalDateHistogram.java | 12 +- .../histogram/ParsedAutoDateHistogram.java | 6 +- .../bucket/histogram/ParsedDateHistogram.java | 6 +- .../range/DateRangeAggregationBuilder.java | 24 +- .../bucket/range/InternalDateRange.java | 10 +- .../bucket/range/ParsedDateRange.java | 9 +- .../DerivativePipelineAggregationBuilder.java | 7 +- .../support/MultiValuesSourceFieldConfig.java | 35 +- .../aggregations/support/ValueType.java | 4 +- .../ValuesSourceAggregationBuilder.java | 24 +- .../support/ValuesSourceConfig.java | 18 +- .../support/ValuesSourceParserHelper.java | 8 +- .../elasticsearch/snapshots/SnapshotInfo.java | 3 +- .../admin/indices/rollover/RolloverIT.java | 14 +- .../DateMathExpressionResolverTests.java | 14 +- .../elasticsearch/common/RoundingTests.java | 5 +- .../joda/JavaJodaTimeDuellingTests.java | 94 +- .../common/joda/JodaDateMathParserTests.java | 19 +- .../elasticsearch/common/joda/JodaTests.java | 29 +- .../common/joda/SimpleJodaTests.java | 800 ------------------ .../common/rounding/RoundingDuelTests.java | 3 + .../common/time/DateFormattersTests.java | 59 +- .../common/time/JavaDateMathParserTests.java | 30 +- .../index/mapper/DateFieldMapperTests.java | 64 +- .../index/mapper/DateFieldTypeTests.java | 37 +- .../index/mapper/DynamicMappingTests.java | 3 +- .../index/mapper/DynamicTemplatesTests.java | 2 - .../index/mapper/RangeFieldMapperTests.java | 2 +- ...angeFieldQueryStringQueryBuilderTests.java | 7 +- .../index/mapper/RangeFieldTypeTests.java | 11 +- .../query/QueryStringQueryBuilderTests.java | 15 +- .../index/query/RangeQueryBuilderTests.java | 18 +- .../indices/IndicesRequestCacheIT.java | 26 +- .../search/DocValueFormatTests.java | 8 +- .../bucket/AutoDateHistogramTests.java | 2 +- .../aggregations/bucket/DateHistogramIT.java | 344 ++++---- .../bucket/DateHistogramOffsetIT.java | 41 +- .../aggregations/bucket/DateRangeIT.java | 262 +++--- .../aggregations/bucket/DateRangeTests.java | 2 +- .../aggregations/bucket/MinDocCountIT.java | 5 +- .../CompositeAggregationBuilderTests.java | 2 +- .../composite/CompositeAggregatorTests.java | 12 +- .../composite/InternalCompositeTests.java | 4 +- .../AutoDateHistogramAggregatorTests.java | 256 +++--- .../DateHistogramAggregatorTests.java | 3 +- .../bucket/histogram/DateHistogramTests.java | 13 +- .../bucket/histogram/ExtendedBoundsTests.java | 24 +- .../InternalAutoDateHistogramTests.java | 26 +- .../histogram/InternalDateHistogramTests.java | 8 +- .../metrics/WeightedAvgAggregatorTests.java | 6 +- .../pipeline/AvgBucketAggregatorTests.java | 7 +- .../aggregations/pipeline/BucketSortIT.java | 8 +- .../CumulativeSumAggregatorTests.java | 3 +- .../pipeline/DateDerivativeIT.java | 203 +++-- .../aggregations/pipeline/MovFnUnitTests.java | 3 +- .../PipelineAggregationHelperTests.java | 2 +- .../MultiValuesSourceFieldConfigTests.java | 4 +- .../highlight/HighlighterSearchIT.java | 10 +- .../search/fields/SearchFieldsIT.java | 14 +- .../search/query/SearchQueryIT.java | 78 +- .../test/AbstractSerializingTestCase.java | 7 + .../license/licensor/TestUtils.java | 10 +- .../org/elasticsearch/license/DateUtils.java | 28 +- .../ml/action/GetOverallBucketsAction.java | 2 +- .../core/ml/action/StartDatafeedAction.java | 2 +- .../ml/datafeed/extractor/ExtractorUtils.java | 10 +- .../xpack/core/ml/utils/time/TimeUtils.java | 4 +- .../rollup/job/DateHistogramGroupConfig.java | 20 +- .../watcher/support/WatcherDateTimeUtils.java | 2 +- .../RewriteCachingDirectoryReaderTests.java | 8 +- .../org/elasticsearch/license/TestUtils.java | 5 +- .../core/ml/datafeed/DatafeedConfigTests.java | 10 +- .../extractor/ExtractorUtilsTests.java | 15 +- .../xpack/core/rollup/ConfigTestHelpers.java | 4 +- ...eHistogramGroupConfigSerializingTests.java | 12 +- .../ml/transforms/PainlessDomainSplitIT.java | 18 +- .../xpack/ml/MlDailyMaintenanceService.java | 15 +- .../xpack/ml/datafeed/DatafeedJob.java | 1 - .../DatafeedDelayedDataDetector.java | 8 +- .../AggregationToJsonProcessor.java | 10 +- .../extractor/fields/ExtractedField.java | 3 - .../OverallBucketsProvider.java | 6 +- .../AbstractExpiredJobDataRemover.java | 6 +- .../retention/ExpiredForecastsRemover.java | 6 +- .../extractor/fields/ExtractedFieldTests.java | 10 +- .../fields/TimeBasedExtractedFieldsTests.java | 8 - .../TimestampFormatFinderTests.java | 4 +- .../AutodetectResultProcessorIT.java | 15 +- .../ml/job/results/AutodetectResultTests.java | 21 +- .../xpack/ml/job/results/BucketTests.java | 4 +- .../xpack/ml/job/results/ForecastTests.java | 2 +- .../xpack/ml/job/results/ModelPlotTests.java | 8 +- .../ml/job/results/OverallBucketTests.java | 4 +- .../xpack/monitoring/MonitoringTestUtils.java | 3 +- .../local/LocalExporterIntegTests.java | 2 +- .../rollup/RollupJobIdentifierUtils.java | 17 +- .../xpack/rollup/job/RollupIndexer.java | 5 +- .../rollup/RollupJobIdentifierUtilTests.java | 5 +- .../rollup/action/SearchActionTests.java | 2 +- .../xpack/rollup/config/ConfigTests.java | 5 +- .../job/RollupIndexerIndexingTests.java | 61 +- .../querydsl/agg/GroupByDateHistogram.java | 3 +- 157 files changed, 1823 insertions(+), 2249 deletions(-) rename rest-api-spec/src/main/resources/rest-api-spec/test/search/{180_local_dependent_mapping.yml => 180_locale_dependent_mapping.yml} (100%) delete mode 100644 server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/time/DateFormatterBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/time/DateFormatterBenchmark.java index b30b3ada0ab64..a364a331400a5 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/time/DateFormatterBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/time/DateFormatterBenchmark.java @@ -55,4 +55,3 @@ public TemporalAccessor parseJodaDate() { return jodaFormatter.parse("1234567890"); } } - diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java index fdcb1f54ea7dd..7beddc13ca598 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java @@ -31,9 +31,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.List; import java.util.Map; @@ -107,7 +107,7 @@ public String typeName() { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { throw new UnsupportedOperationException( "Field [" + name() + "] of type [" + typeName() + "] doesn't support docvalue_fields or aggregations"); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java index d3719ec884fa1..38d635ab3939f 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/ScaledFloatFieldMapper.java @@ -59,10 +59,10 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.MultiValueMode; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.math.BigDecimal; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -301,7 +301,7 @@ public Object valueForDisplay(Object value) { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { if (timeZone != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java index 2eb360255d070..f7288d5039390 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java @@ -31,9 +31,9 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.List; import java.util.Map; @@ -107,7 +107,7 @@ public String typeName() { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { throw new UnsupportedOperationException( "Field [" + name() + "] of type [" + typeName() + "] doesn't support docvalue_fields or aggregations"); } diff --git a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java index d4cc9ee9d6e89..a228283527d66 100644 --- a/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java +++ b/plugins/analysis-icu/src/main/java/org/elasticsearch/index/mapper/ICUCollationKeywordFieldMapper.java @@ -46,9 +46,9 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -208,7 +208,7 @@ public BytesRef parseBytesRef(String value) { }; @Override - public DocValueFormat docValueFormat(final String format, final DateTimeZone timeZone) { + public DocValueFormat docValueFormat(final String format, final ZoneId timeZone) { return COLLATE_FORMAT; } } diff --git a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml index 0f8b5517dd4d2..ea0984ef3bcbf 100644 --- a/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml +++ b/qa/smoke-test-ingest-with-all-dependencies/src/test/resources/rest-api-spec/test/ingest/60_pipeline_timestamp_date_mapping.yml @@ -9,7 +9,7 @@ index: timetest body: mappings: - test: { "properties": { "my_time": {"type": "date"}}} + test: { "properties": { "my_time": {"type": "date", "format": "strict_date_optional_time_nanos"}}} - do: ingest.put_pipeline: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_local_dependent_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml similarity index 100% rename from rest-api-spec/src/main/resources/rest-api-spec/test/search/180_local_dependent_mapping.yml rename to rest-api-spec/src/main/resources/rest-api-spec/test/search/180_locale_dependent_mapping.yml diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java index 04e372a5f91de..ce93b27d770c1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.xcontent.ContextParser; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -368,7 +367,7 @@ public static final class Tombstone implements ToXContentObject, Writeable { TOMBSTONE_PARSER.declareString((b, s) -> {}, new ParseField(DELETE_DATE_KEY)); } - static final DateFormatter FORMATTER = DateFormatters.forPattern("strict_date_optional_time").withZone(ZoneOffset.UTC); + static final DateFormatter FORMATTER = DateFormatter.forPattern("strict_date_optional_time").withZone(ZoneOffset.UTC); static ContextParser getParser() { return (parser, context) -> TOMBSTONE_PARSER.apply(parser, null).build(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index f60866383107a..050d97ba54cf0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -28,8 +28,8 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexNotFoundException; @@ -819,7 +819,7 @@ private static List resolveEmptyOrTrivialWildcard(IndicesOptions options static final class DateMathExpressionResolver implements ExpressionResolver { - private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatters.forPattern("uuuu.MM.dd"); + private static final DateFormatter DEFAULT_DATE_FORMATTER = DateFormatter.forPattern("uuuu.MM.dd"); private static final String EXPRESSION_LEFT_BOUND = "<"; private static final String EXPRESSION_RIGHT_BOUND = ">"; private static final char LEFT_BOUND = '{'; @@ -912,18 +912,19 @@ String resolveExpression(String expression, final Context context) { int formatPatternTimeZoneSeparatorIndex = patternAndTZid.indexOf(TIME_ZONE_BOUND); if (formatPatternTimeZoneSeparatorIndex != -1) { dateFormatterPattern = patternAndTZid.substring(0, formatPatternTimeZoneSeparatorIndex); - timeZone = ZoneId.of(patternAndTZid.substring(formatPatternTimeZoneSeparatorIndex + 1)); + timeZone = DateUtils.of(patternAndTZid.substring(formatPatternTimeZoneSeparatorIndex + 1)); } else { dateFormatterPattern = patternAndTZid; timeZone = ZoneOffset.UTC; } - dateFormatter = DateFormatters.forPattern(dateFormatterPattern); + dateFormatter = DateFormatter.forPattern(dateFormatterPattern); } + DateFormatter formatter = dateFormatter.withZone(timeZone); DateMathParser dateMathParser = formatter.toDateMathParser(); - long millis = dateMathParser.parse(mathExpression, context::getStartTime, false, timeZone); + Instant instant = dateMathParser.parse(mathExpression, context::getStartTime, false, timeZone); - String time = formatter.format(Instant.ofEpochMilli(millis)); + String time = formatter.format(instant); beforePlaceHolderSb.append(time); inPlaceHolderSb = new StringBuilder(); inPlaceHolder = false; diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 21885d1788c7e..f8afbeb449361 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -32,7 +32,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -48,7 +47,7 @@ */ public final class UnassignedInfo implements ToXContentFragment, Writeable { - public static final DateFormatter DATE_TIME_FORMATTER = DateFormatters.forPattern("dateOptionalTime").withZone(ZoneOffset.UTC); + public static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("dateOptionalTime").withZone(ZoneOffset.UTC); public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.positiveTimeSetting("index.unassigned.node_left.delayed_timeout", TimeValue.timeValueMinutes(1), Property.Dynamic, diff --git a/server/src/main/java/org/elasticsearch/common/Rounding.java b/server/src/main/java/org/elasticsearch/common/Rounding.java index 593964f61e93f..dab29c88634e9 100644 --- a/server/src/main/java/org/elasticsearch/common/Rounding.java +++ b/server/src/main/java/org/elasticsearch/common/Rounding.java @@ -19,9 +19,11 @@ package org.elasticsearch.common; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.unit.TimeValue; import java.io.IOException; @@ -188,7 +190,7 @@ static class TimeUnitRounding extends Rounding { TimeUnitRounding(StreamInput in) throws IOException { unit = DateTimeUnit.resolve(in.readByte()); - timeZone = ZoneId.of(in.readString()); + timeZone = DateUtils.of(in.readString()); unitRoundsToMidnight = unit.getField().getBaseUnit().getDuration().toMillis() > 60L * 60L * 1000L; } @@ -367,8 +369,11 @@ public long nextRoundingValue(long utcMillis) { @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeByte(unit.getId()); - String tz = ZoneOffset.UTC.equals(timeZone) ? "UTC" : timeZone.getId(); // stay joda compatible - out.writeString(tz); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeString(timeZone.getId()); + } else { + out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); + } } @Override @@ -417,7 +422,7 @@ public String toString() { TimeIntervalRounding(StreamInput in) throws IOException { interval = in.readVLong(); - timeZone = ZoneId.of(in.readString()); + timeZone = DateUtils.of(in.readString()); } @Override @@ -490,8 +495,11 @@ public long nextRoundingValue(long time) { @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeVLong(interval); - String tz = ZoneOffset.UTC.equals(timeZone) ? "UTC" : timeZone.getId(); // stay joda compatible - out.writeString(tz); + if (out.getVersion().onOrAfter(Version.V_7_0_0)) { + out.writeString(timeZone.getId()); + } else { + out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/common/Table.java b/server/src/main/java/org/elasticsearch/common/Table.java index a41fd267329ff..d097783a838f3 100644 --- a/server/src/main/java/org/elasticsearch/common/Table.java +++ b/server/src/main/java/org/elasticsearch/common/Table.java @@ -20,7 +20,6 @@ package org.elasticsearch.common; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import java.time.Instant; import java.time.ZoneOffset; @@ -85,7 +84,7 @@ public Table endHeaders() { return this; } - private static final DateFormatter FORMATTER = DateFormatters.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); + private static final DateFormatter FORMATTER = DateFormatter.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); public Table startRow() { if (headers.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index fd9ffdfd31d16..7759e13e536b7 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -653,6 +653,23 @@ public DateTimeZone readOptionalTimeZone() throws IOException { return null; } + /** + * Read a {@linkplain DateTimeZone}. + */ + public ZoneId readZoneId() throws IOException { + return ZoneId.of(readString()); + } + + /** + * Read an optional {@linkplain ZoneId}. + */ + public ZoneId readOptionalZoneId() throws IOException { + if (readBoolean()) { + return ZoneId.of(readString()); + } + return null; + } + public int[] readIntArray() throws IOException { int length = readArraySize(); int[] values = new int[length]; diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index 8131335602693..699713cb0f836 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -55,6 +55,7 @@ import java.nio.file.FileSystemLoopException; import java.nio.file.NoSuchFileException; import java.nio.file.NotDirectoryException; +import java.time.ZoneId; import java.time.ZonedDateTime; import java.util.Collection; import java.util.Collections; @@ -677,7 +678,6 @@ public final void writeMap(final Map map, final Writer keyWriter writers.put(ZonedDateTime.class, (o, v) -> { o.writeByte((byte) 23); final ZonedDateTime zonedDateTime = (ZonedDateTime) v; - zonedDateTime.getZone().getId(); o.writeString(zonedDateTime.getZone().getId()); o.writeLong(zonedDateTime.toInstant().toEpochMilli()); }); @@ -988,6 +988,13 @@ public void writeTimeZone(DateTimeZone timeZone) throws IOException { writeString(timeZone.getID()); } + /** + * Write a {@linkplain ZoneId} to the stream. + */ + public void writeZoneId(ZoneId timeZone) throws IOException { + writeString(timeZone.getId()); + } + /** * Write an optional {@linkplain DateTimeZone} to the stream. */ @@ -1000,6 +1007,18 @@ public void writeOptionalTimeZone(@Nullable DateTimeZone timeZone) throws IOExce } } + /** + * Write an optional {@linkplain ZoneId} to the stream. + */ + public void writeOptionalZoneId(@Nullable ZoneId timeZone) throws IOException { + if (timeZone == null) { + writeBoolean(false); + } else { + writeBoolean(true); + writeZoneId(timeZone); + } + } + /** * Writes a list of {@link Streamable} objects */ diff --git a/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java index 5db95b12bb437..706e995530962 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/joda/JodaDateFormatter.java @@ -31,12 +31,12 @@ import java.time.ZonedDateTime; import java.time.temporal.TemporalAccessor; import java.util.Locale; +import java.util.Objects; public class JodaDateFormatter implements DateFormatter { - final String pattern; + final String pattern; final DateTimeFormatter parser; - final DateTimeFormatter printer; public JodaDateFormatter(String pattern, DateTimeFormatter parser, DateTimeFormatter printer) { @@ -108,4 +108,21 @@ public ZoneId zone() { public DateMathParser toDateMathParser() { return new JodaDateMathParser(this); } + + @Override + public int hashCode() { + return Objects.hash(locale(), zone(), pattern()); + } + + @Override + public boolean equals(Object obj) { + if (obj.getClass().equals(this.getClass()) == false) { + return false; + } + JodaDateFormatter other = (JodaDateFormatter) obj; + + return Objects.equals(pattern(), other.pattern()) && + Objects.equals(locale(), other.locale()) && + Objects.equals(zone(), other.zone()); + } } diff --git a/server/src/main/java/org/elasticsearch/common/joda/JodaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/joda/JodaDateMathParser.java index b86af7a75a55f..b7522c6a3233e 100644 --- a/server/src/main/java/org/elasticsearch/common/joda/JodaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/joda/JodaDateMathParser.java @@ -26,6 +26,7 @@ import org.joda.time.MutableDateTime; import org.joda.time.format.DateTimeFormatter; +import java.time.Instant; import java.time.ZoneId; import java.util.Objects; import java.util.function.LongSupplier; @@ -50,7 +51,7 @@ public JodaDateMathParser(JodaDateFormatter dateTimeFormatter) { // if it has been used. For instance, the request cache does not cache requests that make // use of `now`. @Override - public long parse(String text, LongSupplier now, boolean roundUp, ZoneId tz) { + public Instant parse(String text, LongSupplier now, boolean roundUp, ZoneId tz) { final DateTimeZone timeZone = tz == null ? null : DateUtils.zoneIdToDateTimeZone(tz); long time; String mathString; @@ -64,13 +65,13 @@ public long parse(String text, LongSupplier now, boolean roundUp, ZoneId tz) { } else { int index = text.indexOf("||"); if (index == -1) { - return parseDateTime(text, timeZone, roundUp); + return Instant.ofEpochMilli(parseDateTime(text, timeZone, roundUp)); } time = parseDateTime(text.substring(0, index), timeZone, false); mathString = text.substring(index + 2); } - return parseMath(mathString, time, roundUp, timeZone); + return Instant.ofEpochMilli(parseMath(mathString, time, roundUp, timeZone)); } private long parseMath(String mathString, long time, boolean roundUp, DateTimeZone timeZone) throws ElasticsearchParseException { diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java index 8d83aa30b3587..aeea14ee1f011 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatter.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.time; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.joda.Joda; import org.joda.time.DateTime; import java.time.Instant; @@ -87,7 +86,8 @@ default DateTime parseJoda(String input) { * Return the given millis-since-epoch formatted with this format. */ default String formatMillis(long millis) { - return format(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneOffset.UTC)); + ZoneId zone = zone() != null ? zone() : ZoneOffset.UTC; + return format(Instant.ofEpochMilli(millis).atZone(zone)); } /** @@ -121,7 +121,9 @@ default String formatJoda(DateTime dateTime) { ZoneId zone(); /** - * Return a {@link DateMathParser} built from this formatter. + * Create a DateMathParser from the existing formatter + * + * @return The DateMathParser object */ DateMathParser toDateMathParser(); @@ -129,12 +131,11 @@ static DateFormatter forPattern(String input) { if (Strings.hasLength(input) == false) { throw new IllegalArgumentException("No date pattern provided"); } - if (input.startsWith("8") == false) { - return Joda.forPattern(input); - } - // dates starting with 8 will not be using joda but java time formatters - input = input.substring(1); + // support the 6.x BWC compatible way of parsing java 8 dates + if (input.startsWith("8")) { + input = input.substring(1); + } List formatters = new ArrayList<>(); for (String pattern : Strings.delimitedListToStringArray(input, "||")) { diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index de75356a58995..2e3c2953ec375 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -76,28 +76,53 @@ public class DateFormatters { private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_PRINTER = new DateTimeFormatterBuilder() .append(STRICT_YEAR_MONTH_DAY_FORMATTER) .appendLiteral('T') - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) - .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalStart() + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 3, true) + .optionalEnd() + .optionalEnd() .optionalStart() .appendZoneOrOffsetId() .optionalEnd() + .optionalEnd() + .optionalEnd() .toFormatter(Locale.ROOT); private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_FORMATTER = new DateTimeFormatterBuilder() .append(STRICT_YEAR_MONTH_DAY_FORMATTER) .optionalStart() .appendLiteral('T') - .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + .optionalStart() + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) .optionalStart() .appendFraction(NANO_OF_SECOND, 3, 3, true) .optionalEnd() .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalEnd() + .optionalEnd() + .optionalStart() .appendZoneOrOffsetId() .optionalEnd() .optionalStart() .append(TIME_ZONE_FORMATTER_NO_COLON) .optionalEnd() .optionalEnd() + .optionalEnd() + .optionalEnd() .toFormatter(Locale.ROOT); /** @@ -123,11 +148,33 @@ public class DateFormatters { .optionalEnd() .toFormatter(Locale.ROOT); + private static final DateTimeFormatter STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS = new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral('T') + .optionalStart() + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendLiteral(':') + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .optionalStart() + .appendFraction(NANO_OF_SECOND, 3, 9, true) + .optionalEnd() + .optionalEnd() + .optionalStart() + .appendZoneOrOffsetId() + .optionalEnd() + .optionalEnd() + .optionalEnd() + .toFormatter(Locale.ROOT); + /** * Returns a generic ISO datetime parser where the date is mandatory and the time is optional with nanosecond resolution. */ private static final DateFormatter STRICT_DATE_OPTIONAL_TIME_NANOS = new JavaDateFormatter("strict_date_optional_time_nanos", - STRICT_DATE_OPTIONAL_TIME_PRINTER, STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS); + STRICT_DATE_OPTIONAL_TIME_PRINTER_NANOS, STRICT_DATE_OPTIONAL_TIME_FORMATTER_WITH_NANOS); ///////////////////////////////////////// // @@ -329,31 +376,32 @@ public class DateFormatters { * Returns a basic formatter that combines a basic weekyear date and time * without millis, separated by a 'T' (xxxx'W'wwe'T'HHmmssX). */ - private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS = new JavaDateFormatter("strict_basic_week_date_no_millis", - new DateTimeFormatterBuilder() - .append(STRICT_BASIC_WEEK_DATE_PRINTER) - .appendLiteral("T") - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT), - new DateTimeFormatterBuilder() - .append(STRICT_BASIC_WEEK_DATE_PRINTER) - .appendLiteral("T") - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .appendZoneOrOffsetId() - .toFormatter(Locale.ROOT), - new DateTimeFormatterBuilder() - .append(STRICT_BASIC_WEEK_DATE_PRINTER) - .appendLiteral("T") - .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) - .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) - .append(TIME_ZONE_FORMATTER_NO_COLON) - .toFormatter(Locale.ROOT) + private static final DateFormatter STRICT_BASIC_WEEK_DATE_TIME_NO_MILLIS = + new JavaDateFormatter("strict_basic_week_date_time_no_millis", + new DateTimeFormatterBuilder() + .append(STRICT_BASIC_WEEK_DATE_PRINTER) + .appendLiteral("T") + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .appendZoneOrOffsetId() + .toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder() + .append(STRICT_BASIC_WEEK_DATE_PRINTER) + .appendLiteral("T") + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .appendZoneOrOffsetId() + .toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder() + .append(STRICT_BASIC_WEEK_DATE_PRINTER) + .appendLiteral("T") + .appendValue(HOUR_OF_DAY, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(MINUTE_OF_HOUR, 2, 2, SignStyle.NOT_NEGATIVE) + .appendValue(SECOND_OF_MINUTE, 2, 2, SignStyle.NOT_NEGATIVE) + .append(TIME_ZONE_FORMATTER_NO_COLON) + .toFormatter(Locale.ROOT) ); /* @@ -389,7 +437,7 @@ public class DateFormatters { * An ISO date formatter that formats or parses a date without an offset, such as '2011-12-03'. */ private static final DateFormatter STRICT_DATE = new JavaDateFormatter("strict_date", - DateTimeFormatter.ISO_LOCAL_DATE.withResolverStyle(ResolverStyle.LENIENT)); + DateTimeFormatter.ISO_LOCAL_DATE.withResolverStyle(ResolverStyle.LENIENT).withLocale(Locale.ROOT)); /* * A date formatter that formats or parses a date plus an hour without an offset, such as '2011-12-03T01'. @@ -514,7 +562,9 @@ public class DateFormatters { new JavaDateFormatter("strict_hour_minute_second_millis", STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER); - private static final DateFormatter STRICT_HOUR_MINUTE_SECOND_FRACTION = STRICT_HOUR_MINUTE_SECOND_MILLIS; + private static final DateFormatter STRICT_HOUR_MINUTE_SECOND_FRACTION = + new JavaDateFormatter("strict_hour_minute_second_fraction", + STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, STRICT_HOUR_MINUTE_SECOND_MILLIS_FORMATTER); /* * Returns a formatter that combines a full date, two digit hour of day, @@ -537,7 +587,21 @@ public class DateFormatters { .toFormatter(Locale.ROOT) ); - private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = STRICT_DATE_HOUR_MINUTE_SECOND_FRACTION; + private static final DateFormatter STRICT_DATE_HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter( + "strict_date_hour_minute_second_millis", + new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) + .toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_FORMATTER) + // this one here is lenient as well to retain joda time based bwc compatibility + .appendFraction(NANO_OF_SECOND, 1, 3, true) + .toFormatter(Locale.ROOT) + ); /* * Returns a formatter for a two digit hour of day. (HH) @@ -782,14 +846,12 @@ public class DateFormatters { private static final DateTimeFormatter DATE_FORMATTER = new DateTimeFormatterBuilder() .appendValue(ChronoField.YEAR, 1, 5, SignStyle.NORMAL) - .optionalStart() .appendLiteral('-') .appendValue(MONTH_OF_YEAR, 1, 2, SignStyle.NOT_NEGATIVE) .optionalStart() .appendLiteral('-') .appendValue(DAY_OF_MONTH, 1, 2, SignStyle.NOT_NEGATIVE) .optionalEnd() - .optionalEnd() .toFormatter(Locale.ROOT); private static final DateTimeFormatter HOUR_MINUTE_FORMATTER = new DateTimeFormatterBuilder() @@ -928,7 +990,17 @@ public class DateFormatters { .append(HOUR_MINUTE_SECOND_MILLIS_FORMATTER) .toFormatter(Locale.ROOT)); - private static final DateFormatter DATE_HOUR_MINUTE_SECOND_FRACTION = DATE_HOUR_MINUTE_SECOND_MILLIS; + private static final DateFormatter DATE_HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter("date_hour_minute_second_fraction", + new DateTimeFormatterBuilder() + .append(STRICT_YEAR_MONTH_DAY_FORMATTER) + .appendLiteral("T") + .append(STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER) + .toFormatter(Locale.ROOT), + new DateTimeFormatterBuilder() + .append(DATE_FORMATTER) + .appendLiteral("T") + .append(HOUR_MINUTE_SECOND_MILLIS_FORMATTER) + .toFormatter(Locale.ROOT)); /* * Returns a formatter that combines a full date, two digit hour of day, @@ -1033,6 +1105,9 @@ public class DateFormatters { private static final DateFormatter HOUR_MINUTE_SECOND_MILLIS = new JavaDateFormatter("hour_minute_second_millis", STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_MILLIS_FORMATTER); + private static final DateFormatter HOUR_MINUTE_SECOND_FRACTION = new JavaDateFormatter("hour_minute_second_fraction", + STRICT_HOUR_MINUTE_SECOND_MILLIS_PRINTER, HOUR_MINUTE_SECOND_MILLIS_FORMATTER); + /* * Returns a formatter for a two digit hour of day and two digit minute of * hour. (HH:mm) @@ -1272,7 +1347,7 @@ public class DateFormatters { // ///////////////////////////////////////// - public static DateFormatter forPattern(String input) { + static DateFormatter forPattern(String input) { if (Strings.hasLength(input)) { input = input.trim(); } @@ -1331,7 +1406,7 @@ public static DateFormatter forPattern(String input) { } else if ("hourMinuteSecond".equals(input) || "hour_minute_second".equals(input)) { return HOUR_MINUTE_SECOND; } else if ("hourMinuteSecondFraction".equals(input) || "hour_minute_second_fraction".equals(input)) { - return HOUR_MINUTE_SECOND_MILLIS; + return HOUR_MINUTE_SECOND_FRACTION; } else if ("hourMinuteSecondMillis".equals(input) || "hour_minute_second_millis".equals(input)) { return HOUR_MINUTE_SECOND_MILLIS; } else if ("ordinalDate".equals(input) || "ordinal_date".equals(input)) { diff --git a/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java index 1e997cce23be8..3ba392822ca0c 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateMathParser.java @@ -21,6 +21,7 @@ import org.joda.time.DateTimeZone; +import java.time.Instant; import java.time.ZoneId; import java.util.function.LongSupplier; @@ -32,7 +33,7 @@ public interface DateMathParser { /** * Parse a date math expression without timzeone info and rounding down. */ - default long parse(String text, LongSupplier now) { + default Instant parse(String text, LongSupplier now) { return parse(text, now, false, (ZoneId) null); } @@ -42,7 +43,7 @@ default long parse(String text, LongSupplier now) { // exists for backcompat, do not use! @Deprecated - default long parse(String text, LongSupplier now, boolean roundUp, DateTimeZone tz) { + default Instant parse(String text, LongSupplier now, boolean roundUp, DateTimeZone tz) { return parse(text, now, roundUp, tz == null ? null : ZoneId.of(tz.getID())); } @@ -68,7 +69,7 @@ default long parse(String text, LongSupplier now, boolean roundUp, DateTimeZone * @param now a supplier to retrieve the current date in milliseconds, if needed for additions * @param roundUp should the result be rounded up * @param tz an optional timezone that should be applied before returning the milliseconds since the epoch - * @return the parsed date in milliseconds since the epoch + * @return the parsed date as an Instant since the epoch */ - long parse(String text, LongSupplier now, boolean roundUp, ZoneId tz); + Instant parse(String text, LongSupplier now, boolean roundUp, ZoneId tz); } diff --git a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java index c46cee881a1a0..e913a69dca776 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateUtils.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateUtils.java @@ -65,12 +65,16 @@ public static ZoneId dateTimeZoneToZoneId(DateTimeZone timeZone) { return ZoneOffset.UTC; } - String deprecatedId = DEPRECATED_SHORT_TIMEZONES.get(timeZone.getID()); + return of(timeZone.getID()); + } + + public static ZoneId of(String zoneId) { + String deprecatedId = DEPRECATED_SHORT_TIMEZONES.get(zoneId); if (deprecatedId != null) { deprecationLogger.deprecatedAndMaybeLog("timezone", - "Use of short timezone id " + timeZone.getID() + " is deprecated. Use " + deprecatedId + " instead"); + "Use of short timezone id " + zoneId + " is deprecated. Use " + deprecatedId + " instead"); return ZoneId.of(deprecatedId); } - return ZoneId.of(timeZone.getID()); + return ZoneId.of(zoneId).normalized(); } } diff --git a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java index 7e0f17c5f6d9c..c824a7c7e7c35 100644 --- a/server/src/main/java/org/elasticsearch/common/time/EpochTime.java +++ b/server/src/main/java/org/elasticsearch/common/time/EpochTime.java @@ -19,6 +19,8 @@ package org.elasticsearch.common.time; +import org.elasticsearch.bootstrap.JavaVersion; + import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; import java.time.format.ResolverStyle; @@ -99,6 +101,10 @@ public TemporalAccessor resolve(Map fieldValues, } fieldValues.put(ChronoField.INSTANT_SECONDS, seconds); fieldValues.put(ChronoField.NANO_OF_SECOND, nanos); + // if there is already a milli of second, we need to overwrite it + if (fieldValues.containsKey(ChronoField.MILLI_OF_SECOND)) { + fieldValues.put(ChronoField.MILLI_OF_SECOND, nanos / 1_000_000); + } return null; } }; @@ -106,7 +112,8 @@ public TemporalAccessor resolve(Map fieldValues, private static final EpochField NANOS_OF_MILLI = new EpochField(ChronoUnit.NANOS, ChronoUnit.MILLIS, ValueRange.of(0, 999_999)) { @Override public boolean isSupportedBy(TemporalAccessor temporal) { - return temporal.isSupported(ChronoField.NANO_OF_SECOND) && temporal.getLong(ChronoField.NANO_OF_SECOND) % 1_000_000 != 0; + return temporal.isSupported(ChronoField.INSTANT_SECONDS) && temporal.isSupported(ChronoField.NANO_OF_SECOND) + && temporal.getLong(ChronoField.NANO_OF_SECOND) % 1_000_000 != 0; } @Override public long getFrom(TemporalAccessor temporal) { @@ -156,9 +163,20 @@ public long getFrom(TemporalAccessor temporal) { builder -> builder.parseDefaulting(ChronoField.NANO_OF_SECOND, 999_999_999L), SECONDS_FORMATTER1, SECONDS_FORMATTER2, SECONDS_FORMATTER3); - static final DateFormatter MILLIS_FORMATTER = new JavaDateFormatter("epoch_millis", MILLISECONDS_FORMATTER3, - builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), - MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2, MILLISECONDS_FORMATTER3); + static final DateFormatter MILLIS_FORMATTER = getEpochMillisFormatter(); + + private static DateFormatter getEpochMillisFormatter() { + // the third formatter fails under java 8 as a printer, so fall back to this one + final DateTimeFormatter printer; + if (JavaVersion.current().getVersion().get(0) == 8) { + printer = MILLISECONDS_FORMATTER1; + } else { + printer = MILLISECONDS_FORMATTER3; + } + return new JavaDateFormatter("epoch_millis", printer, + builder -> builder.parseDefaulting(EpochTime.NANOS_OF_MILLI, 999_999L), + MILLISECONDS_FORMATTER1, MILLISECONDS_FORMATTER2, MILLISECONDS_FORMATTER3); + } private abstract static class EpochField implements TemporalField { diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index 20ef593a32610..bcdf9cbdcf674 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -24,6 +24,7 @@ import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; +import java.time.format.DateTimeParseException; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalField; @@ -76,6 +77,8 @@ private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeForm if (distinctLocales > 1) { throw new IllegalArgumentException("formatters must have the same locale"); } + this.printer = printer; + this.format = format; if (parsers.length == 0) { this.parser = printer; } else if (parsers.length == 1) { @@ -87,11 +90,11 @@ private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeForm } this.parser = builder.toFormatter(Locale.ROOT); } - this.format = format; - this.printer = printer; DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); - builder.append(this.parser); + if (format.contains("||") == false) { + builder.append(this.parser); + } roundupParserConsumer.accept(builder); DateTimeFormatter roundupFormatter = builder.toFormatter(parser.getLocale()); if (printer.getZone() != null) { @@ -117,7 +120,12 @@ public TemporalAccessor parse(String input) { if (Strings.isNullOrEmpty(input)) { throw new IllegalArgumentException("cannot parse empty date"); } - return parser.parse(input); + + try { + return parser.parse(input); + } catch (DateTimeParseException e) { + throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); + } } @Override @@ -162,7 +170,7 @@ public ZoneId zone() { @Override public DateMathParser toDateMathParser() { - return new JavaDateMathParser(parser, roundupParser); + return new JavaDateMathParser(format, parser, roundupParser); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java index 3c9a1615a6c01..9ee390ba391a7 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java @@ -22,7 +22,6 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; -import java.time.DateTimeException; import java.time.DayOfWeek; import java.time.Instant; import java.time.LocalTime; @@ -30,6 +29,7 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalAdjusters; @@ -48,20 +48,23 @@ public class JavaDateMathParser implements DateMathParser { private final DateTimeFormatter formatter; private final DateTimeFormatter roundUpFormatter; + private final String format; - public JavaDateMathParser(DateTimeFormatter formatter, DateTimeFormatter roundUpFormatter) { + JavaDateMathParser(String format, DateTimeFormatter formatter, DateTimeFormatter roundUpFormatter) { + this.format = format; Objects.requireNonNull(formatter); this.formatter = formatter; this.roundUpFormatter = roundUpFormatter; } @Override - public long parse(String text, LongSupplier now, boolean roundUp, ZoneId timeZone) { - long time; + public Instant parse(String text, LongSupplier now, boolean roundUp, ZoneId timeZone) { + Instant time; String mathString; if (text.startsWith("now")) { try { - time = now.getAsLong(); + // TODO only millisecond granularity here! + time = Instant.ofEpochMilli(now.getAsLong()); } catch (Exception e) { throw new ElasticsearchParseException("could not read the current timestamp", e); } @@ -78,12 +81,12 @@ public long parse(String text, LongSupplier now, boolean roundUp, ZoneId timeZon return parseMath(mathString, time, roundUp, timeZone); } - private long parseMath(final String mathString, final long time, final boolean roundUp, + private Instant parseMath(final String mathString, final Instant time, final boolean roundUp, ZoneId timeZone) throws ElasticsearchParseException { if (timeZone == null) { timeZone = ZoneOffset.UTC; } - ZonedDateTime dateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(time), timeZone); + ZonedDateTime dateTime = ZonedDateTime.ofInstant(time, timeZone); for (int i = 0; i < mathString.length(); ) { char c = mathString.charAt(i++); final boolean round; @@ -204,18 +207,18 @@ private long parseMath(final String mathString, final long time, final boolean r dateTime = dateTime.minus(1, ChronoField.MILLI_OF_SECOND.getBaseUnit()); } } - return dateTime.toInstant().toEpochMilli(); + return dateTime.toInstant(); } - private long parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNoTime) { + private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNoTime) { if (Strings.isNullOrEmpty(value)) { - throw new IllegalArgumentException("cannot parse empty date"); + throw new ElasticsearchParseException("cannot parse empty date"); } DateTimeFormatter formatter = roundUpIfNoTime ? this.roundUpFormatter : this.formatter; try { if (timeZone == null) { - return DateFormatters.toZonedDateTime(formatter.parse(value)).toInstant().toEpochMilli(); + return DateFormatters.toZonedDateTime(formatter.parse(value)).toInstant(); } else { TemporalAccessor accessor = formatter.parse(value); ZoneId zoneId = TemporalQueries.zone().queryFrom(accessor); @@ -223,10 +226,11 @@ private long parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNoTim timeZone = zoneId; } - return DateFormatters.toZonedDateTime(accessor).withZoneSameLocal(timeZone).toInstant().toEpochMilli(); + return DateFormatters.toZonedDateTime(accessor).withZoneSameLocal(timeZone).toInstant(); } - } catch (IllegalArgumentException | DateTimeException e) { - throw new ElasticsearchParseException("failed to parse date field [{}]: [{}]", e, value, e.getMessage()); + } catch (DateTimeParseException e) { + throw new ElasticsearchParseException("failed to parse date field [{}] with format [{}]: [{}]", + e, value, format, e.getMessage()); } } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java index f32ba715a8068..3f731b73dc870 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -22,7 +22,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.script.JodaCompatibleZonedDateTime; @@ -65,9 +64,9 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension { public static final DateTimeFormatter DEFAULT_DATE_PRINTER = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); - public static final DateFormatter DEFAULT_FORMATTER = DateFormatters.forPattern("strict_date_optional_time_nanos"); - public static final DateFormatter LOCAL_TIME_FORMATTER = DateFormatters.forPattern("HH:mm:ss.SSS"); - public static final DateFormatter OFFSET_TIME_FORMATTER = DateFormatters.forPattern("HH:mm:ss.SSSZZZZZ"); + public static final DateFormatter DEFAULT_FORMATTER = DateFormatter.forPattern("strict_date_optional_time_nanos"); + public static final DateFormatter LOCAL_TIME_FORMATTER = DateFormatter.forPattern("HH:mm:ss.SSS"); + public static final DateFormatter OFFSET_TIME_FORMATTER = DateFormatter.forPattern("HH:mm:ss.SSSZZZZZ"); @Override public Map, XContentBuilder.Writer> getXContentWriters() { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index 69b6a6e04a936..7a5bd97770297 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectArrayList; - import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; @@ -41,9 +40,9 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Base64; import java.util.List; import java.util.Map; @@ -108,7 +107,7 @@ public String typeName() { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { return DocValueFormat.BINARY; } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java index 9e0b9f62acbe7..caf8baac24da1 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BooleanFieldMapper.java @@ -40,9 +40,9 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -190,7 +190,7 @@ public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { } @Override - public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java index 1e17aab31605b..0dcf52d5e54f2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DateFieldMapper.java @@ -33,13 +33,15 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -49,18 +51,17 @@ import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; -import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter; - -/** A {@link FieldMapper} for ip addresses. */ +/** A {@link FieldMapper} for dates. */ public class DateFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "date"; @@ -73,8 +74,8 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { private Boolean ignoreMalformed; + private Explicit format = new Explicit<>(DEFAULT_DATE_TIME_FORMATTER.pattern(), false); private Locale locale; - private boolean dateTimeFormatterSet = false; public Builder(String name) { super(name, new DateFieldType(), new DateFieldType()); @@ -102,27 +103,37 @@ protected Explicit ignoreMalformed(BuilderContext context) { return Defaults.IGNORE_MALFORMED; } - /** Whether an explicit format for this date field has been set already. */ - public boolean isDateTimeFormatterSet() { - return dateTimeFormatterSet; + public Builder locale(Locale locale) { + this.locale = locale; + return this; + } + + public Locale locale() { + return locale; } - public Builder dateTimeFormatter(DateFormatter dateTimeFormatter) { - fieldType().setDateTimeFormatter(dateTimeFormatter); - dateTimeFormatterSet = true; + public String format() { + return format.value(); + } + + public Builder format(String format) { + this.format = new Explicit<>(format, true); return this; } - public void locale(Locale locale) { - this.locale = locale; + public boolean isFormatterSet() { + return format.explicit(); } @Override protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); + String pattern = this.format.value(); DateFormatter dateTimeFormatter = fieldType().dateTimeFormatter; - if (!locale.equals(dateTimeFormatter.locale())) { - fieldType().setDateTimeFormatter(dateTimeFormatter.withLocale(locale)); + + boolean hasPatternChanged = Strings.hasLength(pattern) && Objects.equals(pattern, dateTimeFormatter.pattern()) == false; + if (hasPatternChanged || Objects.equals(builder.locale, dateTimeFormatter.locale()) == false) { + fieldType().setDateTimeFormatter(DateFormatter.forPattern(pattern).withLocale(locale)); } } @@ -160,7 +171,7 @@ public Mapper.Builder parse(String name, Map node, ParserCo builder.locale(LocaleUtils.parse(propNode.toString())); iterator.remove(); } else if (propName.equals("format")) { - builder.dateTimeFormatter(parseDateTimeFormatter(propNode)); + builder.format(propNode.toString()); iterator.remove(); } else if (TypeParsers.parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); @@ -196,13 +207,12 @@ public MappedFieldType clone() { public boolean equals(Object o) { if (!super.equals(o)) return false; DateFieldType that = (DateFieldType) o; - return Objects.equals(dateTimeFormatter.pattern(), that.dateTimeFormatter.pattern()) && - Objects.equals(dateTimeFormatter.locale(), that.dateTimeFormatter.locale()); + return Objects.equals(dateTimeFormatter, that.dateTimeFormatter); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), dateTimeFormatter.pattern(), dateTimeFormatter.locale()); + return Objects.hash(super.hashCode(), dateTimeFormatter); } @Override @@ -214,10 +224,10 @@ public String typeName() { public void checkCompatibility(MappedFieldType fieldType, List conflicts) { super.checkCompatibility(fieldType, conflicts); DateFieldType other = (DateFieldType) fieldType; - if (Objects.equals(dateTimeFormatter().pattern(), other.dateTimeFormatter().pattern()) == false) { + if (Objects.equals(dateTimeFormatter.pattern(), other.dateTimeFormatter.pattern()) == false) { conflicts.add("mapper [" + name() + "] has different [format] values"); } - if (Objects.equals(dateTimeFormatter().locale(), other.dateTimeFormatter().locale()) == false) { + if (Objects.equals(dateTimeFormatter.locale(), other.dateTimeFormatter.locale()) == false) { conflicts.add("mapper [" + name() + "] has different [locale] values"); } } @@ -226,9 +236,9 @@ public DateFormatter dateTimeFormatter() { return dateTimeFormatter; } - public void setDateTimeFormatter(DateFormatter dateTimeFormatter) { + void setDateTimeFormatter(DateFormatter formatter) { checkIfFrozen(); - this.dateTimeFormatter = dateTimeFormatter; + this.dateTimeFormatter = formatter; this.dateMathParser = dateTimeFormatter.toDateMathParser(); } @@ -237,7 +247,7 @@ protected DateMathParser dateMathParser() { } long parse(String value) { - return dateTimeFormatter().parseMillis(value); + return DateFormatters.toZonedDateTime(dateTimeFormatter().parse(value)).toInstant().toEpochMilli(); } @Override @@ -260,7 +270,7 @@ public Query termQuery(Object value, @Nullable QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, ShapeRelation relation, - @Nullable DateTimeZone timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { + @Nullable ZoneId timeZone, @Nullable DateMathParser forcedDateParser, QueryShardContext context) { failIfNotIndexed(); if (relation == ShapeRelation.DISJOINT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + @@ -294,8 +304,8 @@ public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower return query; } - public long parseToMilliseconds(Object value, boolean roundUp, @Nullable DateTimeZone zone, - @Nullable DateMathParser forcedDateParser, QueryRewriteContext context) { + public long parseToMilliseconds(Object value, boolean roundUp, + @Nullable ZoneId zone, @Nullable DateMathParser forcedDateParser, QueryRewriteContext context) { DateMathParser dateParser = dateMathParser(); if (forcedDateParser != null) { dateParser = forcedDateParser; @@ -307,13 +317,13 @@ public long parseToMilliseconds(Object value, boolean roundUp, @Nullable DateTim } else { strValue = value.toString(); } - return dateParser.parse(strValue, context::nowInMillis, roundUp, DateUtils.dateTimeZoneToZoneId(zone)); + return dateParser.parse(strValue, context::nowInMillis, roundUp, zone).toEpochMilli(); } @Override - public Relation isFieldWithinQuery(IndexReader reader, Object from, Object to, boolean includeLower, boolean includeUpper, - DateTimeZone timeZone, DateMathParser dateParser, - QueryRewriteContext context) throws IOException { + public Relation isFieldWithinQuery(IndexReader reader, + Object from, Object to, boolean includeLower, boolean includeUpper, + ZoneId timeZone, DateMathParser dateParser, QueryRewriteContext context) throws IOException { if (dateParser == null) { dateParser = this.dateMathParser; } @@ -376,13 +386,13 @@ public Object valueForDisplay(Object value) { } @Override - public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { DateFormatter dateTimeFormatter = this.dateTimeFormatter; if (format != null) { - dateTimeFormatter = DateFormatter.forPattern(format); + dateTimeFormatter = DateFormatter.forPattern(format).withLocale(dateTimeFormatter.locale()); } if (timeZone == null) { - timeZone = DateTimeZone.UTC; + timeZone = ZoneOffset.UTC; } return new DocValueFormat.DateTime(dateTimeFormatter, timeZone); } @@ -442,7 +452,7 @@ protected void parseCreateField(ParseContext context, List field long timestamp; try { timestamp = fieldType().parse(dateAsString); - } catch (IllegalArgumentException e) { + } catch (IllegalArgumentException | ElasticsearchParseException e) { if (ignoreMalformed.value()) { context.addIgnoredField(fieldType.name()); return; @@ -489,8 +499,9 @@ protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, || fieldType().dateTimeFormatter().pattern().equals(DEFAULT_DATE_TIME_FORMATTER.pattern()) == false) { builder.field("format", fieldType().dateTimeFormatter().pattern()); } + if (includeDefaults - || fieldType().dateTimeFormatter().locale() != Locale.ROOT) { + || fieldType().dateTimeFormatter().locale().equals(DEFAULT_DATE_TIME_FORMATTER.locale()) == false) { builder.field("locale", fieldType().dateTimeFormatter().locale()); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 54e59691f80d5..fe2bd6e9eed59 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -21,6 +21,7 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -35,6 +36,7 @@ import org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType; import java.io.IOException; +import java.time.format.DateTimeParseException; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -672,7 +674,7 @@ private static Mapper.Builder createBuilderFromFieldType(final ParseContext private static Mapper.Builder newDateBuilder(String name, DateFormatter dateTimeFormatter, Version indexCreated) { DateFieldMapper.Builder builder = new DateFieldMapper.Builder(name); if (dateTimeFormatter != null) { - builder.dateTimeFormatter(dateTimeFormatter); + builder.format(dateTimeFormatter.pattern()).locale(dateTimeFormatter.locale()); } return builder; } @@ -717,8 +719,8 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont // `epoch_millis` or `YYYY` for (DateFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) { try { - dateTimeFormatter.parseMillis(text); - } catch (IllegalArgumentException e) { + dateTimeFormatter.parse(text); + } catch (ElasticsearchParseException | DateTimeParseException | IllegalArgumentException e) { // failure to parse this, continue continue; } @@ -728,8 +730,8 @@ private static Mapper.Builder createBuilderFromDynamicValue(final ParseCont } if (builder instanceof DateFieldMapper.Builder) { DateFieldMapper.Builder dateBuilder = (DateFieldMapper.Builder) builder; - if (dateBuilder.isDateTimeFormatterSet() == false) { - dateBuilder.dateTimeFormatter(dateTimeFormatter); + if (dateBuilder.isFormatterSet() == false) { + dateBuilder.format(dateTimeFormatter.pattern()).locale(dateTimeFormatter.locale()); } } return builder; diff --git a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java index a8ef46b93060e..2b52e42ffe558 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/IpFieldMapper.java @@ -44,10 +44,10 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.net.InetAddress; +import java.time.ZoneId; import java.util.Arrays; import java.util.Iterator; import java.util.List; @@ -303,7 +303,7 @@ public Object valueForDisplay(Object value) { } @Override - public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java index f785e01125f69..5ef689709400d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappedFieldType.java @@ -50,9 +50,9 @@ import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.similarity.SimilarityProvider; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.List; import java.util.Objects; @@ -335,10 +335,10 @@ public Query termsQuery(List values, @Nullable QueryShardContext context) { * @param relation the relation, nulls should be interpreted like INTERSECTS */ public Query rangeQuery( - Object lowerTerm, Object upperTerm, - boolean includeLower, boolean includeUpper, - ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, - QueryShardContext context) { + Object lowerTerm, Object upperTerm, + boolean includeLower, boolean includeUpper, + ShapeRelation relation, ZoneId timeZone, DateMathParser parser, + QueryShardContext context) { throw new IllegalArgumentException("Field [" + name + "] of type [" + typeName() + "] does not support range queries"); } @@ -413,7 +413,7 @@ public Relation isFieldWithinQuery( IndexReader reader, Object from, Object to, boolean includeLower, boolean includeUpper, - DateTimeZone timeZone, DateMathParser dateMathParser, QueryRewriteContext context) throws IOException { + ZoneId timeZone, DateMathParser dateMathParser, QueryRewriteContext context) throws IOException { return Relation.INTERSECTS; } @@ -448,7 +448,7 @@ public void setEagerGlobalOrdinals(boolean eagerGlobalOrdinals) { /** Return a {@link DocValueFormat} that can be used to display and parse * values as returned by the fielddata API. * The default implementation returns a {@link DocValueFormat#RAW}. */ - public DocValueFormat docValueFormat(@Nullable String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(@Nullable String format, ZoneId timeZone) { if (format != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom formats"); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java index 8d9a688776548..06e12ca8b5e4c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/NumberFieldMapper.java @@ -53,9 +53,9 @@ import org.elasticsearch.index.fielddata.plain.DocValuesIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; @@ -961,7 +961,7 @@ public Object valueForDisplay(Object value) { } @Override - public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + public DocValueFormat docValueFormat(String format, ZoneId timeZone) { if (timeZone != null) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support custom time zones"); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java index d93c909ff8445..e5ba55de7bfd0 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RangeFieldMapper.java @@ -42,6 +42,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.network.InetAddresses; @@ -49,19 +50,18 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateMathParser; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.util.LocaleUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.NumberFieldMapper.NumberType; import org.elasticsearch.index.query.QueryShardContext; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; @@ -71,7 +71,6 @@ import java.util.Objects; import java.util.Set; -import static org.elasticsearch.index.mapper.TypeParsers.parseDateTimeFormatter; import static org.elasticsearch.index.query.RangeQueryBuilder.GTE_FIELD; import static org.elasticsearch.index.query.RangeQueryBuilder.GT_FIELD; import static org.elasticsearch.index.query.RangeQueryBuilder.LTE_FIELD; @@ -92,12 +91,12 @@ public static class Defaults { public static class Builder extends FieldMapper.Builder { private Boolean coerce; - private Locale locale; + private Locale locale = Locale.ROOT; + private String pattern; public Builder(String name, RangeType type) { super(name, new RangeFieldType(type), new RangeFieldType(type)); builder = this; - locale = Locale.ROOT; } @Override @@ -128,8 +127,8 @@ protected Explicit coerce(BuilderContext context) { return Defaults.COERCE; } - public Builder dateTimeFormatter(DateFormatter dateTimeFormatter) { - fieldType().setDateTimeFormatter(dateTimeFormatter); + public Builder format(String format) { + this.pattern = format; return this; } @@ -145,12 +144,15 @@ public void locale(Locale locale) { @Override protected void setupFieldType(BuilderContext context) { super.setupFieldType(context); - DateFormatter dateTimeFormatter = fieldType().dateTimeFormatter; + DateFormatter formatter = fieldType().dateTimeFormatter; if (fieldType().rangeType == RangeType.DATE) { - if (!locale.equals(dateTimeFormatter.locale())) { - fieldType().setDateTimeFormatter(dateTimeFormatter.withLocale(locale)); + boolean hasPatternChanged = Strings.hasLength(builder.pattern) && + Objects.equals(builder.pattern, formatter.pattern()) == false; + + if (hasPatternChanged || Objects.equals(builder.locale, formatter.locale()) == false) { + fieldType().setDateTimeFormatter(DateFormatter.forPattern(pattern).withLocale(locale)); } - } else if (dateTimeFormatter != null) { + } else if (pattern != null) { throw new IllegalArgumentException("field [" + name() + "] of type [" + fieldType().rangeType + "] should not define a dateTimeFormatter unless it is a " + RangeType.DATE + " type"); } @@ -190,7 +192,7 @@ public Mapper.Builder parse(String name, Map node, builder.locale(LocaleUtils.parse(propNode.toString())); iterator.remove(); } else if (propName.equals("format")) { - builder.dateTimeFormatter(parseDateTimeFormatter(propNode)); + builder.format(propNode.toString()); iterator.remove(); } else if (TypeParsers.parseMultiField(builder, name, parserContext, propName, propNode)) { iterator.remove(); @@ -219,8 +221,8 @@ public static final class RangeFieldType extends MappedFieldType { RangeFieldType(RangeFieldType other) { super(other); this.rangeType = other.rangeType; - if (other.dateTimeFormatter() != null) { - setDateTimeFormatter(other.dateTimeFormatter); + if (other.rangeType == RangeType.DATE && other.dateTimeFormatter() != null) { + setDateTimeFormatter(other.dateTimeFormatter()); } } @@ -235,15 +237,13 @@ public boolean equals(Object o) { RangeFieldType that = (RangeFieldType) o; return Objects.equals(rangeType, that.rangeType) && (rangeType == RangeType.DATE) ? - Objects.equals(dateTimeFormatter.pattern(), that.dateTimeFormatter.pattern()) - && Objects.equals(dateTimeFormatter.locale(), that.dateTimeFormatter.locale()) + Objects.equals(dateTimeFormatter, that.dateTimeFormatter) : dateTimeFormatter == null && that.dateTimeFormatter == null; } @Override public int hashCode() { - return (dateTimeFormatter == null) ? Objects.hash(super.hashCode(), rangeType) - : Objects.hash(super.hashCode(), rangeType, dateTimeFormatter.pattern(), dateTimeFormatter.locale()); + return Objects.hash(super.hashCode(), rangeType, dateTimeFormatter); } @Override @@ -285,7 +285,7 @@ public Query termQuery(Object value, QueryShardContext context) { @Override public Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, QueryShardContext context) { + ShapeRelation relation, ZoneId timeZone, DateMathParser parser, QueryShardContext context) { failIfNotIndexed(); if (parser == null) { parser = dateMathParser(); @@ -543,7 +543,8 @@ public Field getRangeField(String name, Range r) { return new LongRange(name, new long[] {((Number)r.from).longValue()}, new long[] {((Number)r.to).longValue()}); } private Number parse(DateMathParser dateMathParser, String dateStr) { - return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");}); + return dateMathParser.parse(dateStr, () -> {throw new IllegalArgumentException("now is not used at indexing time");}) + .toEpochMilli(); } @Override public Number parseFrom(RangeFieldType fieldType, XContentParser parser, boolean coerce, boolean included) @@ -586,18 +587,18 @@ public Query dvRangeQuery(String field, QueryType queryType, Object from, Object @Override public Query rangeQuery(String field, boolean hasDocValues, Object lowerTerm, Object upperTerm, boolean includeLower, - boolean includeUpper, ShapeRelation relation, @Nullable DateTimeZone timeZone, + boolean includeUpper, ShapeRelation relation, @Nullable ZoneId timeZone, @Nullable DateMathParser parser, QueryShardContext context) { - DateTimeZone zone = (timeZone == null) ? DateTimeZone.UTC : timeZone; - ZoneId zoneId = DateUtils.dateTimeZoneToZoneId(zone); + ZoneId zone = (timeZone == null) ? ZoneOffset.UTC : timeZone; + DateMathParser dateMathParser = (parser == null) ? DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser() : parser; Long low = lowerTerm == null ? Long.MIN_VALUE : dateMathParser.parse(lowerTerm instanceof BytesRef ? ((BytesRef) lowerTerm).utf8ToString() : lowerTerm.toString(), - context::nowInMillis, false, zoneId); + context::nowInMillis, false, zone).toEpochMilli(); Long high = upperTerm == null ? Long.MAX_VALUE : dateMathParser.parse(upperTerm instanceof BytesRef ? ((BytesRef) upperTerm).utf8ToString() : upperTerm.toString(), - context::nowInMillis, false, zoneId); + context::nowInMillis, false, zone).toEpochMilli(); return super.rangeQuery(field, hasDocValues, low, high, includeLower, includeUpper, relation, zone, dateMathParser, context); @@ -910,7 +911,7 @@ public Object parse(Object value, boolean coerce) { return numberType.parse(value, coerce); } public Query rangeQuery(String field, boolean hasDocValues, Object from, Object to, boolean includeFrom, boolean includeTo, - ShapeRelation relation, @Nullable DateTimeZone timeZone, @Nullable DateMathParser dateMathParser, + ShapeRelation relation, @Nullable ZoneId timeZone, @Nullable DateMathParser dateMathParser, QueryShardContext context) { Object lower = from == null ? minValue() : parse(from, false); Object upper = to == null ? maxValue() : parse(to, false); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java index f35e6126dbe71..6d2f0fddd86c2 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/RootObjectMapper.java @@ -22,7 +22,6 @@ import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.ToXContent; @@ -46,7 +45,7 @@ public static class Defaults { public static final DateFormatter[] DYNAMIC_DATE_TIME_FORMATTERS = new DateFormatter[]{ DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, - Joda.getStrictStandardDateFormatter() + DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis") }; public static final boolean DATE_DETECTION = true; public static final boolean NUMERIC_DETECTION = false; @@ -55,8 +54,7 @@ public static class Defaults { public static class Builder extends ObjectMapper.Builder { protected Explicit dynamicTemplates = new Explicit<>(new DynamicTemplate[0], false); - protected Explicit dynamicDateTimeFormatters = - new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false); + protected Explicit dynamicDateTimeFormatters = new Explicit<>(Defaults.DYNAMIC_DATE_TIME_FORMATTERS, false); protected Explicit dateDetection = new Explicit<>(Defaults.DATE_DETECTION, false); protected Explicit numericDetection = new Explicit<>(Defaults.NUMERIC_DETECTION, false); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java b/server/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java index 3d3b160787050..366eb3b36f0fe 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/SimpleMappedFieldType.java @@ -23,7 +23,8 @@ import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.index.query.QueryShardContext; -import org.joda.time.DateTimeZone; + +import java.time.ZoneId; /** * {@link MappedFieldType} base impl for field types that are neither dates nor ranges. @@ -40,7 +41,7 @@ protected SimpleMappedFieldType(MappedFieldType ref) { @Override public final Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, - ShapeRelation relation, DateTimeZone timeZone, DateMathParser parser, QueryShardContext context) { + ShapeRelation relation, ZoneId timeZone, DateMathParser parser, QueryShardContext context) { if (relation == ShapeRelation.DISJOINT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + "] does not support DISJOINT ranges"); @@ -52,7 +53,7 @@ public final Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includ } /** - * Same as {@link #rangeQuery(Object, Object, boolean, boolean, ShapeRelation, DateTimeZone, DateMathParser, QueryShardContext)} + * Same as {@link #rangeQuery(Object, Object, boolean, boolean, ShapeRelation, ZoneId, DateMathParser, QueryShardContext)} * but without the trouble of relations or date-specific options. */ protected Query rangeQuery(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java index d93caf3c4e8d1..8cf66009ea140 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TypeParsers.java @@ -43,7 +43,6 @@ public class TypeParsers { public static final String INDEX_OPTIONS_POSITIONS = "positions"; public static final String INDEX_OPTIONS_OFFSETS = "offsets"; - private static void parseAnalyzersAndTermVectors(FieldMapper.Builder builder, String name, Map fieldNode, Mapper.TypeParser.ParserContext parserContext) { NamedAnalyzer indexAnalyzer = null; diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index 6ae9055efcefc..363384030a2ac 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -38,9 +38,9 @@ import org.elasticsearch.index.query.support.QueryParsers; import org.elasticsearch.index.search.QueryParserHelper; import org.elasticsearch.index.search.QueryStringQueryParser; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.List; import java.util.Locale; @@ -144,7 +144,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder i private static final ParseField RELATION_FIELD = new ParseField("relation"); private final String fieldName; - private Object from; - private Object to; - - private DateTimeZone timeZone; - + private ZoneId timeZone; private boolean includeLower = DEFAULT_INCLUDE_LOWER; - private boolean includeUpper = DEFAULT_INCLUDE_UPPER; - - private DateFormatter format; - + private String format; private ShapeRelation relation; /** @@ -101,11 +95,8 @@ public RangeQueryBuilder(StreamInput in) throws IOException { to = in.readGenericValue(); includeLower = in.readBoolean(); includeUpper = in.readBoolean(); - timeZone = in.readOptionalTimeZone(); - String formatString = in.readOptionalString(); - if (formatString != null) { - format = DateFormatter.forPattern(formatString); - } + timeZone = in.readOptionalZoneId(); + format = in.readOptionalString(); String relationString = in.readOptionalString(); if (relationString != null) { relation = ShapeRelation.getRelationByName(relationString); @@ -129,12 +120,8 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeGenericValue(this.to); out.writeBoolean(this.includeLower); out.writeBoolean(this.includeUpper); - out.writeOptionalTimeZone(timeZone); - String formatString = null; - if (this.format != null) { - formatString = this.format.pattern(); - } - out.writeOptionalString(formatString); + out.writeOptionalZoneId(timeZone); + out.writeOptionalString(format); String relationString = null; if (this.relation != null) { relationString = this.relation.getRelationName(); @@ -267,7 +254,11 @@ public RangeQueryBuilder timeZone(String timeZone) { if (timeZone == null) { throw new IllegalArgumentException("timezone cannot be null"); } - this.timeZone = DateTimeZone.forID(timeZone); + try { + this.timeZone = ZoneId.of(timeZone); + } catch (DateTimeException e) { + throw new IllegalArgumentException(e); + } return this; } @@ -275,10 +266,10 @@ public RangeQueryBuilder timeZone(String timeZone) { * In case of date field, gets the from/to fields timezone adjustment */ public String timeZone() { - return this.timeZone == null ? null : this.timeZone.getID(); + return this.timeZone == null ? null : this.timeZone.getId(); } - DateTimeZone getDateTimeZone() { // for testing + ZoneId getDateTimeZone() { // for testing return timeZone; } @@ -289,7 +280,9 @@ public RangeQueryBuilder format(String format) { if (format == null) { throw new IllegalArgumentException("format cannot be null"); } - this.format = DateFormatter.forPattern(format); + // this just ensure that the pattern is actually valid, no need to keep it here + DateFormatter.forPattern(format); + this.format = format; return this; } @@ -297,12 +290,12 @@ public RangeQueryBuilder format(String format) { * Gets the format field to parse the from/to fields */ public String format() { - return this.format == null ? null : this.format.pattern(); + return format; } DateMathParser getForceDateParser() { // pkg private for testing - if (this.format != null) { - return this.format.toDateMathParser(); + if (Strings.hasText(format)) { + return DateFormatter.forPattern(this.format).toDateMathParser(); } return null; } @@ -334,10 +327,10 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep builder.field(INCLUDE_LOWER_FIELD.getPreferredName(), includeLower); builder.field(INCLUDE_UPPER_FIELD.getPreferredName(), includeUpper); if (timeZone != null) { - builder.field(TIME_ZONE_FIELD.getPreferredName(), timeZone.getID()); + builder.field(TIME_ZONE_FIELD.getPreferredName(), timeZone.getId()); } - if (format != null) { - builder.field(FORMAT_FIELD.getPreferredName(), format.pattern()); + if (Strings.hasText(format)) { + builder.field(FORMAT_FIELD.getPreferredName(), format); } if (relation != null) { builder.field(RELATION_FIELD.getPreferredName(), relation.getRelationName()); @@ -531,21 +524,17 @@ protected Query doToQuery(QueryShardContext context) throws IOException { @Override protected int doHashCode() { - String timeZoneId = timeZone == null ? null : timeZone.getID(); - String formatString = format == null ? null : format.pattern(); - return Objects.hash(fieldName, from, to, timeZoneId, includeLower, includeUpper, formatString); + return Objects.hash(fieldName, from, to, timeZone, includeLower, includeUpper, format); } @Override protected boolean doEquals(RangeQueryBuilder other) { - String timeZoneId = timeZone == null ? null : timeZone.getID(); - String formatString = format == null ? null : format.pattern(); return Objects.equals(fieldName, other.fieldName) && Objects.equals(from, other.from) && Objects.equals(to, other.to) && - Objects.equals(timeZoneId, other.timeZone()) && + Objects.equals(timeZone, other.timeZone) && Objects.equals(includeLower, other.includeLower) && Objects.equals(includeUpper, other.includeUpper) && - Objects.equals(formatString, other.format()); + Objects.equals(format, other.format); } } diff --git a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java index 4974ef9277e9a..dc5354c7e0522 100644 --- a/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/elasticsearch/index/search/QueryStringQueryParser.java @@ -54,9 +54,9 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -89,7 +89,7 @@ public class QueryStringQueryParser extends XQueryParser { private Analyzer forceQuoteAnalyzer; private String quoteFieldSuffix; private boolean analyzeWildcard; - private DateTimeZone timeZone; + private ZoneId timeZone; private Fuzziness fuzziness = Fuzziness.AUTO; private int fuzzyMaxExpansions = FuzzyQuery.defaultMaxExpansions; private MappedFieldType currentFieldType; @@ -227,7 +227,7 @@ public void setAnalyzeWildcard(boolean analyzeWildcard) { /** * @param timeZone Time Zone to be applied to any range query related to dates. */ - public void setTimeZone(DateTimeZone timeZone) { + public void setTimeZone(ZoneId timeZone) { this.timeZone = timeZone; } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 719558edbf748..90ebc8e074108 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -19,9 +19,6 @@ package org.elasticsearch.ingest; -import java.util.Collections; -import java.util.IdentityHashMap; -import java.util.Set; import org.elasticsearch.common.Strings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.mapper.IdFieldMapper; @@ -37,12 +34,15 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; +import java.util.Collections; import java.util.Date; import java.util.EnumMap; import java.util.HashMap; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; /** * Represents a single document being captured before indexing and holds the source and metadata (like id, type and index). diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index 5b55c00875d47..7bf26fd5e57a4 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -22,7 +22,6 @@ import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import java.lang.management.ManagementFactory; @@ -43,7 +42,7 @@ public class HotThreads { private static final Object mutex = new Object(); - private static final DateFormatter DATE_TIME_FORMATTER = DateFormatters.forPattern("dateOptionalTime"); + private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("dateOptionalTime"); private int busiestThreads = 3; private TimeValue interval = new TimeValue(500, TimeUnit.MILLISECONDS); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java index 676f2bbdc7b2e..bb449d584b2c8 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestIndicesAction.java @@ -39,7 +39,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.RestController; @@ -61,6 +61,7 @@ public class RestIndicesAction extends AbstractCatAction { + private static final DateFormatter STRICT_DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_time"); private final IndexNameExpressionResolver indexNameExpressionResolver; public RestIndicesAction(Settings settings, RestController controller, IndexNameExpressionResolver indexNameExpressionResolver) { @@ -432,7 +433,7 @@ Table buildTable(RestRequest request, Index[] indices, ClusterHealthResponse res table.addCell(indexMetaData.getCreationDate()); ZonedDateTime creationTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(indexMetaData.getCreationDate()), ZoneOffset.UTC); - table.addCell(DateFormatters.forPattern("strict_date_time").format(creationTime)); + table.addCell(STRICT_DATE_TIME_FORMATTER.format(creationTime)); table.addCell(totalStats.getStore() == null ? null : totalStats.getStore().size()); table.addCell(primaryStats.getStore() == null ? null : primaryStats.getStore().size()); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java index fb302b1b3b3a4..22258ce2d8878 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -99,7 +98,7 @@ protected Table getTableWithHeader(RestRequest request) { .endHeaders(); } - private static final DateFormatter FORMATTER = DateFormatters.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); + private static final DateFormatter FORMATTER = DateFormatter.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); private Table buildTable(RestRequest req, GetSnapshotsResponse getSnapshotsResponse) { Table table = getTableWithHeader(req); diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java index 39b3f08dcdc5f..573eac6c04941 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestTasksAction.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -125,7 +124,7 @@ protected Table getTableWithHeader(final RestRequest request) { return table; } - private static final DateFormatter FORMATTER = DateFormatters.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); + private static final DateFormatter FORMATTER = DateFormatter.forPattern("HH:mm:ss").withZone(ZoneOffset.UTC); private void buildRow(Table table, boolean fullId, boolean detailed, DiscoveryNodes discoveryNodes, TaskInfo taskInfo) { table.startRow(); diff --git a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java index 546deb3a24b68..fc3816cad8a15 100644 --- a/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java +++ b/server/src/main/java/org/elasticsearch/script/JodaCompatibleZonedDateTime.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateUtils; import org.joda.time.DateTime; @@ -50,7 +49,7 @@ * A wrapper around ZonedDateTime that exposes joda methods for backcompat. */ public class JodaCompatibleZonedDateTime { - private static final DateFormatter DATE_FORMATTER = DateFormatters.forPattern("strict_date_time"); + private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("strict_date_time"); private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(JodaCompatibleZonedDateTime.class)); diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java index a911a4f197d67..753ef1fb23d85 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java @@ -212,7 +212,7 @@ public static final class DecayDateLinear { double scaling; public DecayDateLinear(String originStr, String scaleStr, String offsetStr, double decay) { - this.origin = dateParser.parse(originStr, null, false, defaultZoneId); + this.origin = dateParser.parse(originStr, null, false, defaultZoneId).toEpochMilli(); long scale = TimeValue.parseTimeValue(scaleStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".scale") .getMillis(); this.offset = TimeValue.parseTimeValue(offsetStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".offset") @@ -235,7 +235,7 @@ public static final class DecayDateExp { double scaling; public DecayDateExp(String originStr, String scaleStr, String offsetStr, double decay) { - this.origin = dateParser.parse(originStr, null, false, defaultZoneId); + this.origin = dateParser.parse(originStr, null, false, defaultZoneId).toEpochMilli(); long scale = TimeValue.parseTimeValue(scaleStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".scale") .getMillis(); this.offset = TimeValue.parseTimeValue(offsetStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".offset") @@ -258,7 +258,7 @@ public static final class DecayDateGauss { double scaling; public DecayDateGauss(String originStr, String scaleStr, String offsetStr, double decay) { - this.origin = dateParser.parse(originStr, null, false, defaultZoneId); + this.origin = dateParser.parse(originStr, null, false, defaultZoneId).toEpochMilli(); long scale = TimeValue.parseTimeValue(scaleStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".scale") .getMillis(); this.offset = TimeValue.parseTimeValue(offsetStr, TimeValue.timeValueHours(24), getClass().getSimpleName() + ".offset") diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 900e1d7fd09ca..ceefe035d4613 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -21,6 +21,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; @@ -30,7 +31,6 @@ import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.time.DateUtils; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.net.InetAddress; @@ -38,6 +38,7 @@ import java.text.DecimalFormatSymbols; import java.text.NumberFormat; import java.text.ParseException; +import java.time.Instant; import java.time.ZoneId; import java.util.Arrays; import java.util.Base64; @@ -164,20 +165,24 @@ final class DateTime implements DocValueFormat { public static final String NAME = "date_time"; final DateFormatter formatter; - // TODO: change this to ZoneId, but will require careful change to serialization - final DateTimeZone timeZone; - private final ZoneId zoneId; + final ZoneId timeZone; private final DateMathParser parser; - public DateTime(DateFormatter formatter, DateTimeZone timeZone) { - this.formatter = Objects.requireNonNull(formatter); + public DateTime(DateFormatter formatter, ZoneId timeZone) { + this.formatter = formatter; this.timeZone = Objects.requireNonNull(timeZone); - this.zoneId = DateUtils.dateTimeZoneToZoneId(timeZone); this.parser = formatter.toDateMathParser(); } public DateTime(StreamInput in) throws IOException { - this(DateFormatter.forPattern(in.readString()), DateTimeZone.forID(in.readString())); + this.formatter = DateFormatter.forPattern(in.readString()); + this.parser = formatter.toDateMathParser(); + String zoneId = in.readString(); + if (in.getVersion().before(Version.V_7_0_0)) { + this.timeZone = DateUtils.of(zoneId); + } else { + this.timeZone = ZoneId.of(zoneId); + } } @Override @@ -188,12 +193,16 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); - out.writeString(timeZone.getID()); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); + } else { + out.writeString(timeZone.getId()); + } } @Override public String format(long value) { - return formatter.withZone(zoneId).formatMillis(value); + return formatter.format(Instant.ofEpochMilli(value).atZone(timeZone)); } @Override @@ -203,7 +212,7 @@ public String format(double value) { @Override public long parseLong(String value, boolean roundUp, LongSupplier now) { - return parser.parse(value, now, roundUp, DateUtils.dateTimeZoneToZoneId(timeZone)); + return parser.parse(value, now, roundUp, timeZone).toEpochMilli(); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 28970ec828af9..53a7832884c76 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -19,11 +19,12 @@ package org.elasticsearch.search.aggregations.bucket.composite; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -37,9 +38,10 @@ import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.Objects; import static org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder.DATE_FIELD_UNITS; @@ -70,9 +72,9 @@ public class DateHistogramValuesSourceBuilder extends CompositeValuesSourceBuild }, Histogram.INTERVAL_FIELD, ObjectParser.ValueType.LONG); PARSER.declareField(DateHistogramValuesSourceBuilder::timeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return DateTimeZone.forID(p.text()); + return ZoneId.of(p.text()); } else { - return DateTimeZone.forOffsetHours(p.intValue()); + return ZoneOffset.ofHours(p.intValue()); } }, new ParseField("time_zone"), ObjectParser.ValueType.LONG); CompositeValuesSourceParserHelper.declareValuesSourceFields(PARSER, ValueType.NUMERIC); @@ -82,7 +84,7 @@ static DateHistogramValuesSourceBuilder parse(String name, XContentParser parser } private long interval = 0; - private DateTimeZone timeZone = null; + private ZoneId timeZone = null; private DateHistogramInterval dateHistogramInterval; public DateHistogramValuesSourceBuilder(String name) { @@ -93,8 +95,10 @@ protected DateHistogramValuesSourceBuilder(StreamInput in) throws IOException { super(in); this.interval = in.readLong(); this.dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); - if (in.readBoolean()) { - timeZone = DateTimeZone.forID(in.readString()); + if (in.getVersion().before(Version.V_7_0_0)) { + this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); + } else { + this.timeZone = in.readOptionalZoneId(); } } @@ -102,10 +106,10 @@ protected DateHistogramValuesSourceBuilder(StreamInput in) throws IOException { protected void innerWriteTo(StreamOutput out) throws IOException { out.writeLong(interval); out.writeOptionalWriteable(dateHistogramInterval); - boolean hasTimeZone = timeZone != null; - out.writeBoolean(hasTimeZone); - if (hasTimeZone) { - out.writeString(timeZone.getID()); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); + } else { + out.writeOptionalZoneId(timeZone); } } @@ -176,7 +180,7 @@ public DateHistogramValuesSourceBuilder dateHistogramInterval(DateHistogramInter /** * Sets the time zone to use for this aggregation */ - public DateHistogramValuesSourceBuilder timeZone(DateTimeZone timeZone) { + public DateHistogramValuesSourceBuilder timeZone(ZoneId timeZone) { if (timeZone == null) { throw new IllegalArgumentException("[timeZone] must not be null: [" + name + "]"); } @@ -187,14 +191,14 @@ public DateHistogramValuesSourceBuilder timeZone(DateTimeZone timeZone) { /** * Gets the time zone to use for this aggregation */ - public DateTimeZone timeZone() { + public ZoneId timeZone() { return timeZone; } private Rounding createRounding() { Rounding.Builder tzRoundingBuilder; if (dateHistogramInterval != null) { - DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); + Rounding.DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); if (dateTimeUnit != null) { tzRoundingBuilder = Rounding.builder(dateTimeUnit); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java index 635690c44f49e..9ee142fcd2fd5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/RoundingValuesSource.java @@ -21,7 +21,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.Rounding; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.search.aggregations.support.ValuesSource; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 87ba80af9a4b0..794ce066ed76e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -20,11 +20,10 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -42,9 +41,9 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -70,19 +69,19 @@ public class AutoDateHistogramAggregationBuilder * The current implementation probably should not be invoked in a tight loop. * @return Array of RoundingInfo */ - static RoundingInfo[] buildRoundings(DateTimeZone timeZone) { + static RoundingInfo[] buildRoundings(ZoneId timeZone) { RoundingInfo[] roundings = new RoundingInfo[6]; - roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone), - 1000L, "s" , 1, 5, 10, 30); - roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone), + roundings[0] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.SECOND_OF_MINUTE, timeZone), + 1000L, "s", 1, 5, 10, 30); + roundings[1] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.MINUTES_OF_HOUR, timeZone), 60 * 1000L, "m", 1, 5, 10, 30); - roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone), - 60 * 60 * 1000L, "h", 1, 3, 12); - roundings[3] = new RoundingInfo(createRounding(DateTimeUnit.DAY_OF_MONTH, timeZone), + roundings[2] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.HOUR_OF_DAY, timeZone), + 60 * 60 * 1000L, "h",1, 3, 12); + roundings[3] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.DAY_OF_MONTH, timeZone), 24 * 60 * 60 * 1000L, "d", 1, 7); - roundings[4] = new RoundingInfo(createRounding(DateTimeUnit.MONTH_OF_YEAR, timeZone), + roundings[4] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.MONTH_OF_YEAR, timeZone), 30 * 24 * 60 * 60 * 1000L, "M", 1, 3); - roundings[5] = new RoundingInfo(createRounding(DateTimeUnit.YEAR_OF_CENTURY, timeZone), + roundings[5] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.YEAR_OF_CENTURY, timeZone), 365 * 24 * 60 * 60 * 1000L, "y", 1, 5, 10, 20, 50, 100); return roundings; } @@ -156,7 +155,7 @@ public int getNumBuckets() { return new AutoDateHistogramAggregatorFactory(name, config, numBuckets, roundings, context, parent, subFactoriesBuilder, metaData); } - static Rounding createRounding(DateTimeUnit interval, DateTimeZone timeZone) { + static Rounding createRounding(Rounding.DateTimeUnit interval, ZoneId timeZone) { Rounding.Builder tzRoundingBuilder = Rounding.builder(interval); if (timeZone != null) { tzRoundingBuilder.timeZone(timeZone); @@ -196,7 +195,7 @@ public RoundingInfo(Rounding rounding, long roughEstimateDurationMillis, String } public RoundingInfo(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); + rounding = Rounding.read(in); roughEstimateDurationMillis = in.readVLong(); innerIntervals = in.readIntArray(); unitAbbreviation = in.readString(); @@ -204,7 +203,7 @@ public RoundingInfo(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); + rounding.writeTo(out); out.writeVLong(roughEstimateDurationMillis); out.writeIntArray(innerIntervals); out.writeString(unitAbbreviation); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java index 81bb70bd9672a..1b982ea9deca2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregator.java @@ -23,8 +23,8 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 5199313e0aca1..6d7852a864453 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -23,10 +23,9 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.search.DocIdSetIterator; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.unit.TimeValue; @@ -54,10 +53,12 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTimeField; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.zone.ZoneOffsetTransition; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -70,29 +71,30 @@ */ public class DateHistogramAggregationBuilder extends ValuesSourceAggregationBuilder implements MultiBucketAggregationBuilder { + public static final String NAME = "date_histogram"; private static DateMathParser EPOCH_MILLIS_PARSER = DateFormatter.forPattern("epoch_millis").toDateMathParser(); - public static final Map DATE_FIELD_UNITS; + public static final Map DATE_FIELD_UNITS; static { - Map dateFieldUnits = new HashMap<>(); - dateFieldUnits.put("year", DateTimeUnit.YEAR_OF_CENTURY); - dateFieldUnits.put("1y", DateTimeUnit.YEAR_OF_CENTURY); - dateFieldUnits.put("quarter", DateTimeUnit.QUARTER); - dateFieldUnits.put("1q", DateTimeUnit.QUARTER); - dateFieldUnits.put("month", DateTimeUnit.MONTH_OF_YEAR); - dateFieldUnits.put("1M", DateTimeUnit.MONTH_OF_YEAR); - dateFieldUnits.put("week", DateTimeUnit.WEEK_OF_WEEKYEAR); - dateFieldUnits.put("1w", DateTimeUnit.WEEK_OF_WEEKYEAR); - dateFieldUnits.put("day", DateTimeUnit.DAY_OF_MONTH); - dateFieldUnits.put("1d", DateTimeUnit.DAY_OF_MONTH); - dateFieldUnits.put("hour", DateTimeUnit.HOUR_OF_DAY); - dateFieldUnits.put("1h", DateTimeUnit.HOUR_OF_DAY); - dateFieldUnits.put("minute", DateTimeUnit.MINUTES_OF_HOUR); - dateFieldUnits.put("1m", DateTimeUnit.MINUTES_OF_HOUR); - dateFieldUnits.put("second", DateTimeUnit.SECOND_OF_MINUTE); - dateFieldUnits.put("1s", DateTimeUnit.SECOND_OF_MINUTE); + Map dateFieldUnits = new HashMap<>(); + dateFieldUnits.put("year", Rounding.DateTimeUnit.YEAR_OF_CENTURY); + dateFieldUnits.put("1y", Rounding.DateTimeUnit.YEAR_OF_CENTURY); + dateFieldUnits.put("quarter", Rounding.DateTimeUnit.QUARTER_OF_YEAR); + dateFieldUnits.put("1q", Rounding.DateTimeUnit.QUARTER_OF_YEAR); + dateFieldUnits.put("month", Rounding.DateTimeUnit.MONTH_OF_YEAR); + dateFieldUnits.put("1M", Rounding.DateTimeUnit.MONTH_OF_YEAR); + dateFieldUnits.put("week", Rounding.DateTimeUnit.WEEK_OF_WEEKYEAR); + dateFieldUnits.put("1w", Rounding.DateTimeUnit.WEEK_OF_WEEKYEAR); + dateFieldUnits.put("day", Rounding.DateTimeUnit.DAY_OF_MONTH); + dateFieldUnits.put("1d", Rounding.DateTimeUnit.DAY_OF_MONTH); + dateFieldUnits.put("hour", Rounding.DateTimeUnit.HOUR_OF_DAY); + dateFieldUnits.put("1h", Rounding.DateTimeUnit.HOUR_OF_DAY); + dateFieldUnits.put("minute", Rounding.DateTimeUnit.MINUTES_OF_HOUR); + dateFieldUnits.put("1m", Rounding.DateTimeUnit.MINUTES_OF_HOUR); + dateFieldUnits.put("second", Rounding.DateTimeUnit.SECOND_OF_MINUTE); + dateFieldUnits.put("1s", Rounding.DateTimeUnit.SECOND_OF_MINUTE); DATE_FIELD_UNITS = unmodifiableMap(dateFieldUnits); } @@ -369,11 +371,11 @@ public String getType() { * coordinating node in order to generate missing buckets, which may cross a transition * even though data on the shards doesn't. */ - DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { - final DateTimeZone tz = timeZone(); + ZoneId rewriteTimeZone(QueryShardContext context) throws IOException { + final ZoneId tz = timeZone(); if (field() != null && tz != null && - tz.isFixed() == false && + tz.getRules().isFixedOffset() == false && field() != null && script() == null) { final MappedFieldType ft = context.fieldMapper(field()); @@ -391,16 +393,29 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { } if (anyInstant != null) { - final long prevTransition = tz.previousTransition(anyInstant); - final long nextTransition = tz.nextTransition(anyInstant); + Instant instant = Instant.ofEpochMilli(anyInstant); + ZoneOffsetTransition prevOffsetTransition = tz.getRules().previousTransition(instant); + final long prevTransition; + if (prevOffsetTransition != null) { + prevTransition = prevOffsetTransition.getInstant().toEpochMilli(); + } else { + prevTransition = instant.toEpochMilli(); + } + ZoneOffsetTransition nextOffsetTransition = tz.getRules().nextTransition(instant); + final long nextTransition; + if (nextOffsetTransition != null) { + nextTransition = nextOffsetTransition.getInstant().toEpochMilli(); + } else { + nextTransition = instant.toEpochMilli(); + } // We need all not only values but also rounded values to be within // [prevTransition, nextTransition]. final long low; - DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); + Rounding.DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); if (intervalAsUnit != null) { - final DateTimeField dateTimeField = intervalAsUnit.field(tz); - low = dateTimeField.roundCeiling(prevTransition); + Rounding rounding = Rounding.builder(intervalAsUnit).timeZone(timeZone()).build(); + low = rounding.nextRoundingValue(prevTransition); } else { final TimeValue intervalAsMillis = getIntervalAsTimeValue(); low = Math.addExact(prevTransition, intervalAsMillis.millis()); @@ -408,12 +423,12 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { // rounding rounds down, so 'nextTransition' is a good upper bound final long high = nextTransition; - if (ft.isFieldWithinQuery(reader, low, high, true, false, DateTimeZone.UTC, EPOCH_MILLIS_PARSER, + if (ft.isFieldWithinQuery(reader, low, high, true, false, ZoneOffset.UTC, EPOCH_MILLIS_PARSER, context) == Relation.WITHIN) { // All values in this reader have the same offset despite daylight saving times. // This is very common for location-based timezones such as Europe/Paris in // combination with time-based indices. - return DateTimeZone.forOffsetMillis(tz.getOffset(anyInstant)); + return ZoneOffset.ofTotalSeconds(tz.getRules().getOffset(instant).getTotalSeconds()); } } } @@ -424,9 +439,9 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { @Override protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - final DateTimeZone tz = timeZone(); + final ZoneId tz = timeZone(); final Rounding rounding = createRounding(tz); - final DateTimeZone rewrittenTimeZone = rewriteTimeZone(context.getQueryShardContext()); + final ZoneId rewrittenTimeZone = rewriteTimeZone(context.getQueryShardContext()); final Rounding shardRounding; if (tz == rewrittenTimeZone) { shardRounding = rounding; @@ -447,7 +462,7 @@ DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { * {@code null} then it means that the interval is expressed as a fixed * {@link TimeValue} and may be accessed via * {@link #getIntervalAsTimeValue()}. */ - private DateTimeUnit getIntervalAsDateTimeUnit() { + private Rounding.DateTimeUnit getIntervalAsDateTimeUnit() { if (dateHistogramInterval != null) { return DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); } @@ -466,9 +481,9 @@ private TimeValue getIntervalAsTimeValue() { } } - private Rounding createRounding(DateTimeZone timeZone) { + private Rounding createRounding(ZoneId timeZone) { Rounding.Builder tzRoundingBuilder; - DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); + Rounding.DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); if (intervalAsUnit != null) { tzRoundingBuilder = Rounding.builder(intervalAsUnit); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index 735a6717210a5..0c7a91505ae88 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -23,8 +23,8 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregator; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index c7ad6de7e0d72..8c025eb34eeb3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -19,7 +19,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.Rounding; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java index 4cecfeff83381..b0dfbb9d66e9d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java @@ -21,10 +21,10 @@ import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentFragment; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java index f2e450942c3ad..63d08f5e832ac 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -19,9 +19,9 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregations; @@ -32,10 +32,10 @@ import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -108,7 +108,7 @@ public String getKeyAsString() { @Override public Object getKey() { - return new DateTime(key, DateTimeZone.UTC); + return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 496f8efc60ccf..2fa7f15a703ec 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -20,9 +20,9 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.PriorityQueue; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.Aggregations; @@ -34,10 +34,10 @@ import org.elasticsearch.search.aggregations.KeyComparable; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -112,7 +112,7 @@ public String getKeyAsString() { @Override public Object getKey() { - return new DateTime(key, DateTimeZone.UTC); + return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } @Override @@ -185,13 +185,13 @@ static class EmptyBucketInfo { } EmptyBucketInfo(StreamInput in) throws IOException { - rounding = Rounding.Streams.read(in); + rounding = Rounding.read(in); subAggregations = InternalAggregations.readAggregations(in); bounds = in.readOptionalWriteable(ExtendedBounds::new); } void writeTo(StreamOutput out) throws IOException { - Rounding.Streams.write(rounding, out); + rounding.writeTo(out); subAggregations.writeTo(out); out.writeOptionalWriteable(bounds); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java index c9ff1389f8ad3..66a29b4e05073 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedAutoDateHistogram.java @@ -24,10 +24,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.List; public class ParsedAutoDateHistogram extends ParsedMultiBucketAggregation implements Histogram { @@ -83,7 +83,7 @@ public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBuck @Override public Object getKey() { if (key != null) { - return new DateTime(key, DateTimeZone.UTC); + return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } return null; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedDateHistogram.java index ace0cb59907a8..1cf43a53ed26c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ParsedDateHistogram.java @@ -23,10 +23,10 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.List; public class ParsedDateHistogram extends ParsedMultiBucketAggregation implements Histogram { @@ -62,7 +62,7 @@ public static class ParsedBucket extends ParsedMultiBucketAggregation.ParsedBuck @Override public Object getKey() { if (key != null) { - return new DateTime(key, DateTimeZone.UTC); + return Instant.ofEpochMilli(key).atZone(ZoneOffset.UTC); } return null; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java index b5bdba85b78ef..2b5e92ddcb3f9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java @@ -30,9 +30,9 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTime; import java.io.IOException; +import java.time.ZonedDateTime; import java.util.Map; public class DateRangeAggregationBuilder extends AbstractRangeBuilder { @@ -224,24 +224,24 @@ public DateRangeAggregationBuilder addUnboundedFrom(double from) { * @param to * the upper bound on the dates, exclusive */ - public DateRangeAggregationBuilder addRange(String key, DateTime from, DateTime to) { + public DateRangeAggregationBuilder addRange(String key, ZonedDateTime from, ZonedDateTime to) { addRange(new RangeAggregator.Range(key, convertDateTime(from), convertDateTime(to))); return this; } - private static Double convertDateTime(DateTime dateTime) { + private static Double convertDateTime(ZonedDateTime dateTime) { if (dateTime == null) { return null; } else { - return (double) dateTime.getMillis(); + return (double) dateTime.toInstant().toEpochMilli(); } } /** - * Same as {@link #addRange(String, DateTime, DateTime)} but the key will be + * Same as {@link #addRange(String, ZonedDateTime, ZonedDateTime)} but the key will be * automatically generated based on from and to. */ - public DateRangeAggregationBuilder addRange(DateTime from, DateTime to) { + public DateRangeAggregationBuilder addRange(ZonedDateTime from, ZonedDateTime to) { return addRange(null, from, to); } @@ -253,16 +253,16 @@ public DateRangeAggregationBuilder addRange(DateTime from, DateTime to) { * @param to * the upper bound on the dates, exclusive */ - public DateRangeAggregationBuilder addUnboundedTo(String key, DateTime to) { + public DateRangeAggregationBuilder addUnboundedTo(String key, ZonedDateTime to) { addRange(new RangeAggregator.Range(key, null, convertDateTime(to))); return this; } /** - * Same as {@link #addUnboundedTo(String, DateTime)} but the key will be + * Same as {@link #addUnboundedTo(String, ZonedDateTime)} but the key will be * computed automatically. */ - public DateRangeAggregationBuilder addUnboundedTo(DateTime to) { + public DateRangeAggregationBuilder addUnboundedTo(ZonedDateTime to) { return addUnboundedTo(null, to); } @@ -274,16 +274,16 @@ public DateRangeAggregationBuilder addUnboundedTo(DateTime to) { * @param from * the lower bound on the distances, inclusive */ - public DateRangeAggregationBuilder addUnboundedFrom(String key, DateTime from) { + public DateRangeAggregationBuilder addUnboundedFrom(String key, ZonedDateTime from) { addRange(new RangeAggregator.Range(key, convertDateTime(from), null)); return this; } /** - * Same as {@link #addUnboundedFrom(String, DateTime)} but the key will be + * Same as {@link #addUnboundedFrom(String, ZonedDateTime)} but the key will be * computed automatically. */ - public DateRangeAggregationBuilder addUnboundedFrom(DateTime from) { + public DateRangeAggregationBuilder addUnboundedFrom(ZonedDateTime from) { return addUnboundedFrom(null, from); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java index 408c1325b85c9..a354aaeadbac0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalDateRange.java @@ -24,10 +24,10 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.support.ValueType; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; import java.util.List; import java.util.Map; @@ -48,12 +48,14 @@ public Bucket(String key, double from, double to, long docCount, InternalAggrega @Override public Object getFrom() { - return Double.isInfinite(((Number) from).doubleValue()) ? null : new DateTime(((Number) from).longValue(), DateTimeZone.UTC); + return Double.isInfinite(((Number) from).doubleValue()) ? null : + Instant.ofEpochMilli(((Number) from).longValue()).atZone(ZoneOffset.UTC); } @Override public Object getTo() { - return Double.isInfinite(((Number) to).doubleValue()) ? null : new DateTime(((Number) to).longValue(), DateTimeZone.UTC); + return Double.isInfinite(((Number) to).doubleValue()) ? null : + Instant.ofEpochMilli(((Number) to).longValue()).atZone(ZoneOffset.UTC); } private Double internalGetFrom() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedDateRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedDateRange.java index 68adc41d23765..d4504e245541b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedDateRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ParsedDateRange.java @@ -21,10 +21,11 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; public class ParsedDateRange extends ParsedRange { @@ -59,11 +60,11 @@ public Object getTo() { return doubleAsDateTime(to); } - private static DateTime doubleAsDateTime(Double d) { + private static ZonedDateTime doubleAsDateTime(Double d) { if (d == null || Double.isInfinite(d)) { return null; } - return new DateTime(d.longValue(), DateTimeZone.UTC); + return Instant.ofEpochMilli(d.longValue()).atZone(ZoneOffset.UTC); } static ParsedBucket fromXContent(final XContentParser parser, final boolean keyed) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java index b8785d0bf7045..68ec9085df52a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/DerivativePipelineAggregationBuilder.java @@ -21,9 +21,9 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -34,7 +34,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy; -import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.ArrayList; @@ -139,9 +138,9 @@ protected PipelineAggregator createInternal(Map metaData) throws } Long xAxisUnits = null; if (units != null) { - DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(units); + Rounding.DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(units); if (dateTimeUnit != null) { - xAxisUnits = dateTimeUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + xAxisUnits = dateTimeUnit.getField().getBaseUnit().getDuration().toMillis(); } else { TimeValue timeValue = TimeValue.parseTimeValue(units, null, getClass().getSimpleName() + ".unit"); if (timeValue != null) { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java index fbc3081758f96..de112c427a751 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java @@ -19,19 +19,22 @@ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.Objects; import java.util.function.BiFunction; @@ -39,7 +42,7 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject private String fieldName; private Object missing; private Script script; - private DateTimeZone timeZone; + private ZoneId timeZone; private static final String NAME = "field_config"; @@ -62,16 +65,16 @@ public class MultiValuesSourceFieldConfig implements Writeable, ToXContentObject if (timezoneAware) { parser.declareField(MultiValuesSourceFieldConfig.Builder::setTimeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return DateTimeZone.forID(p.text()); + return ZoneId.of(p.text()); } else { - return DateTimeZone.forOffsetHours(p.intValue()); + return ZoneOffset.ofHours(p.intValue()); } }, ParseField.CommonFields.TIME_ZONE, ObjectParser.ValueType.LONG); } return parser; }; - private MultiValuesSourceFieldConfig(String fieldName, Object missing, Script script, DateTimeZone timeZone) { + private MultiValuesSourceFieldConfig(String fieldName, Object missing, Script script, ZoneId timeZone) { this.fieldName = fieldName; this.missing = missing; this.script = script; @@ -82,7 +85,11 @@ public MultiValuesSourceFieldConfig(StreamInput in) throws IOException { this.fieldName = in.readString(); this.missing = in.readGenericValue(); this.script = in.readOptionalWriteable(Script::new); - this.timeZone = in.readOptionalTimeZone(); + if (in.getVersion().before(Version.V_7_0_0)) { + this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); + } else { + this.timeZone = in.readOptionalZoneId(); + } } public Object getMissing() { @@ -93,7 +100,7 @@ public Script getScript() { return script; } - public DateTimeZone getTimeZone() { + public ZoneId getTimeZone() { return timeZone; } @@ -106,7 +113,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeGenericValue(missing); out.writeOptionalWriteable(script); - out.writeOptionalTimeZone(timeZone); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); + } else { + out.writeOptionalZoneId(timeZone); + } } @Override @@ -122,7 +133,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(ParseField.CommonFields.FIELD.getPreferredName(), fieldName); } if (timeZone != null) { - builder.field(ParseField.CommonFields.TIME_ZONE.getPreferredName(), timeZone.getID()); + builder.field(ParseField.CommonFields.TIME_ZONE.getPreferredName(), timeZone.getId()); } builder.endObject(); return builder; @@ -153,7 +164,7 @@ public static class Builder { private String fieldName; private Object missing = null; private Script script = null; - private DateTimeZone timeZone = null; + private ZoneId timeZone = null; public String getFieldName() { return fieldName; @@ -182,11 +193,11 @@ public Builder setScript(Script script) { return this; } - public DateTimeZone getTimeZone() { + public ZoneId getTimeZone() { return timeZone; } - public Builder setTimeZone(DateTimeZone timeZone) { + public Builder setTimeZone(ZoneId timeZone) { this.timeZone = timeZone; return this; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java index 25a90e581f00c..3cbd11288bffc 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValueType.java @@ -28,9 +28,9 @@ import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.DocValueFormat; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneOffset; public enum ValueType implements Writeable { @@ -42,7 +42,7 @@ public enum ValueType implements Writeable { DOUBLE((byte) 3, "float|double", "double", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW), NUMBER((byte) 4, "number", "number", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW), DATE((byte) 5, "date", "date", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, - new DocValueFormat.DateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, DateTimeZone.UTC)), + new DocValueFormat.DateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER, ZoneOffset.UTC)), IP((byte) 6, "ip", "ip", ValuesSourceType.BYTES, IndexFieldData.class, DocValueFormat.IP), // TODO: what is the difference between "number" and "numeric"? NUMERIC((byte) 7, "numeric", "numeric", ValuesSourceType.NUMERIC, IndexNumericFieldData.class, DocValueFormat.RAW), diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 040cc1b542f07..d3abe6f3169ee 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.search.aggregations.support; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.Script; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -28,9 +30,9 @@ import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.internal.SearchContext; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Map; import java.util.Objects; @@ -81,7 +83,7 @@ public final AB subAggregations(Builder subFactories) { private ValueType valueType = null; private String format = null; private Object missing = null; - private DateTimeZone timeZone = null; + private ZoneId timeZone = null; protected ValuesSourceConfig config; protected ValuesSourceAggregationBuilder(String name, ValuesSourceType valuesSourceType, ValueType targetValueType) { @@ -144,8 +146,10 @@ private void read(StreamInput in) throws IOException { } format = in.readOptionalString(); missing = in.readGenericValue(); - if (in.readBoolean()) { - timeZone = DateTimeZone.forID(in.readString()); + if (in.getVersion().before(Version.V_7_0_0)) { + timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); + } else { + timeZone = in.readOptionalZoneId(); } } @@ -167,10 +171,10 @@ protected final void doWriteTo(StreamOutput out) throws IOException { } out.writeOptionalString(format); out.writeGenericValue(missing); - boolean hasTimeZone = timeZone != null; - out.writeBoolean(hasTimeZone); - if (hasTimeZone) { - out.writeString(timeZone.getID()); + if (out.getVersion().before(Version.V_7_0_0)) { + out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); + } else { + out.writeOptionalZoneId(timeZone); } innerWriteTo(out); } @@ -289,7 +293,7 @@ public Object missing() { * Sets the time zone to use for this aggregation */ @SuppressWarnings("unchecked") - public AB timeZone(DateTimeZone timeZone) { + public AB timeZone(ZoneId timeZone) { if (timeZone == null) { throw new IllegalArgumentException("[timeZone] must not be null: [" + name + "]"); } @@ -300,7 +304,7 @@ public AB timeZone(DateTimeZone timeZone) { /** * Gets the time zone to use for this aggregation */ - public DateTimeZone timeZone() { + public ZoneId timeZone() { return timeZone; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java index 39c53f39c7dac..82baa04fe8f1a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceConfig.java @@ -32,7 +32,9 @@ import org.elasticsearch.script.Script; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationExecutionException; -import org.joda.time.DateTimeZone; + +import java.time.ZoneId; +import java.time.ZoneOffset; /** * A configuration that tells aggregations how to retrieve data from the index @@ -48,7 +50,7 @@ public static ValuesSourceConfig resolve( ValueType valueType, String field, Script script, Object missing, - DateTimeZone timeZone, + ZoneId timeZone, String format) { if (field == null) { @@ -121,7 +123,7 @@ private static AggregationScript.LeafFactory createScript(Script script, QuerySh } } - private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType, @Nullable DateTimeZone tz) { + private static DocValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType, @Nullable ZoneId tz) { if (valueType == null) { return DocValueFormat.RAW; // we can't figure it out } @@ -130,7 +132,7 @@ private static DocValueFormat resolveFormat(@Nullable String format, @Nullable V valueFormat = new DocValueFormat.Decimal(format); } if (valueFormat instanceof DocValueFormat.DateTime && format != null) { - valueFormat = new DocValueFormat.DateTime(DateFormatter.forPattern(format), tz != null ? tz : DateTimeZone.UTC); + valueFormat = new DocValueFormat.DateTime(DateFormatter.forPattern(format), tz != null ? tz : ZoneOffset.UTC); } return valueFormat; } @@ -142,7 +144,7 @@ private static DocValueFormat resolveFormat(@Nullable String format, @Nullable V private boolean unmapped = false; private DocValueFormat format = DocValueFormat.RAW; private Object missing; - private DateTimeZone timeZone; + private ZoneId timeZone; public ValuesSourceConfig(ValuesSourceType valueSourceType) { this.valueSourceType = valueSourceType; @@ -206,12 +208,12 @@ public Object missing() { return this.missing; } - public ValuesSourceConfig timezone(final DateTimeZone timeZone) { - this.timeZone= timeZone; + public ValuesSourceConfig timezone(final ZoneId timeZone) { + this.timeZone = timeZone; return this; } - public DateTimeZone timezone() { + public ZoneId timezone() { return this.timeZone; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java index fc0a2f3a9fefe..24bdffaa3fa89 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParserHelper.java @@ -25,7 +25,9 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; -import org.joda.time.DateTimeZone; + +import java.time.ZoneId; +import java.time.ZoneOffset; public final class ValuesSourceParserHelper { @@ -91,9 +93,9 @@ private static void declareFields( if (timezoneAware) { objectParser.declareField(ValuesSourceAggregationBuilder::timeZone, p -> { if (p.currentToken() == XContentParser.Token.VALUE_STRING) { - return DateTimeZone.forID(p.text()); + return ZoneId.of(p.text()); } else { - return DateTimeZone.forOffsetHours(p.intValue()); + return ZoneOffset.ofHours(p.intValue()); } }, ParseField.CommonFields.TIME_ZONE, ObjectParser.ValueType.LONG); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index c1692f606178e..cbd4ff659e599 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; @@ -52,7 +51,7 @@ public final class SnapshotInfo implements Comparable, ToXContent, public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; - private static final DateFormatter DATE_TIME_FORMATTER = DateFormatters.forPattern("strictDateOptionalTime"); + private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strictDateOptionalTime"); private static final String SNAPSHOT = "snapshot"; private static final String UUID = "uuid"; private static final String INDICES = "indices"; diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index 6b8d1ab4fafb7..9f6f19596d080 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -25,7 +25,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -276,7 +276,7 @@ public void testRolloverOnExistingIndex() throws Exception { public void testRolloverWithDateMath() { ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); assumeTrue("only works on the same day", now.plusMinutes(5).getDayOfYear() == now.getDayOfYear()); - String index = "test-" + DateFormatters.forPattern("YYYY.MM.dd").format(now) + "-1"; + String index = "test-" + DateFormatter.forPattern("YYYY.MM.dd").format(now) + "-1"; String dateMathExp = ""; assertAcked(prepareCreate(dateMathExp).addAlias(new Alias("test_alias")).get()); ensureGreen(index); @@ -290,14 +290,14 @@ public void testRolloverWithDateMath() { ensureGreen(index); RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias").get(); assertThat(response.getOldIndex(), equalTo(index)); - assertThat(response.getNewIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM").format(now) + "-000002")); + assertThat(response.getNewIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM").format(now) + "-000002")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); response = client().admin().indices().prepareRolloverIndex("test_alias").get(); - assertThat(response.getOldIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM").format(now) + "-000002")); - assertThat(response.getNewIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM").format(now) + "-000003")); + assertThat(response.getOldIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM").format(now) + "-000002")); + assertThat(response.getNewIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM").format(now) + "-000003")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); @@ -310,8 +310,8 @@ public void testRolloverWithDateMath() { IndexMetaData.SETTING_INDEX_PROVIDED_NAME)); response = client().admin().indices().prepareRolloverIndex("test_alias").setNewIndexName("").get(); - assertThat(response.getOldIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM").format(now) + "-000003")); - assertThat(response.getNewIndex(), equalTo("test-" + DateFormatters.forPattern("YYYY.MM.dd").format(now) + "-000004")); + assertThat(response.getOldIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM").format(now) + "-000003")); + assertThat(response.getNewIndex(), equalTo("test-" + DateFormatter.forPattern("YYYY.MM.dd").format(now) + "-000004")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java index 5c67e1bbe566c..2f52bd0d40aae 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DateMathExpressionResolverTests.java @@ -93,25 +93,25 @@ public void testExpression_MultiParts() throws Exception { @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37037") public void testExpression_CustomFormat() throws Exception { - List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{YYYY.MM.dd}}>")); + List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd}}>")); assertThat(results.size(), equalTo(1)); assertThat(results.get(0), - equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + equalTo(".marvel-" + DateTimeFormat.forPattern("yyyy.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); } public void testExpression_EscapeStatic() throws Exception { List result = expressionResolver.resolve(context, Arrays.asList("<.mar\\{v\\}el-{now/d}>")); assertThat(result.size(), equalTo(1)); assertThat(result.get(0), - equalTo(".mar{v}el-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); + equalTo(".mar{v}el-" + DateTimeFormat.forPattern("yyyy.MM.dd").print(new DateTime(context.getStartTime(), UTC)))); } @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37037") public void testExpression_EscapeDateFormat() throws Exception { - List result = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'YYYY}}>")); + List result = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{'\\{year\\}'yyyy}}>")); assertThat(result.size(), equalTo(1)); assertThat(result.get(0), - equalTo(".marvel-" + DateTimeFormat.forPattern("'{year}'YYYY").print(new DateTime(context.getStartTime(), UTC)))); + equalTo(".marvel-" + DateTimeFormat.forPattern("'{year}'yyyy").print(new DateTime(context.getStartTime(), UTC)))); } public void testExpression_MixedArray() throws Exception { @@ -150,10 +150,10 @@ public void testExpression_CustomTimeZoneInIndexName() throws Exception { now = DateTime.now(UTC).withHourOfDay(0).withMinuteOfHour(0).withSecondOfMinute(0); } Context context = new Context(this.context.getState(), this.context.getOptions(), now.getMillis()); - List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{YYYY.MM.dd|" + timeZone.getID() + "}}>")); + List results = expressionResolver.resolve(context, Arrays.asList("<.marvel-{now/d{yyyy.MM.dd|" + timeZone.getID() + "}}>")); assertThat(results.size(), equalTo(1)); logger.info("timezone: [{}], now [{}], name: [{}]", timeZone, now, results.get(0)); - assertThat(results.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("YYYY.MM.dd").print(now.withZone(timeZone)))); + assertThat(results.get(0), equalTo(".marvel-" + DateTimeFormat.forPattern("yyyy.MM.dd").print(now.withZone(timeZone)))); } public void testExpressionInvalidUnescaped() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/common/RoundingTests.java b/server/src/test/java/org/elasticsearch/common/RoundingTests.java index 1664f67a44df9..9bc7c10abd8c8 100644 --- a/server/src/test/java/org/elasticsearch/common/RoundingTests.java +++ b/server/src/test/java/org/elasticsearch/common/RoundingTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -317,7 +318,7 @@ public void testIntervalRounding_HalfDay_DST() { } /** - * randomized test on {@link org.elasticsearch.common.rounding.Rounding.TimeIntervalRounding} with random interval and time zone offsets + * randomized test on {@link org.elasticsearch.common.Rounding.TimeIntervalRounding} with random interval and time zone offsets */ public void testIntervalRoundingRandom() { for (int i = 0; i < 1000; i++) { @@ -728,7 +729,7 @@ private static long time(String time) { } private static long time(String time, ZoneId zone) { - TemporalAccessor accessor = DateFormatters.forPattern("date_optional_time").withZone(zone).parse(time); + TemporalAccessor accessor = DateFormatter.forPattern("date_optional_time").withZone(zone).parse(time); return DateFormatters.toZonedDateTime(accessor).toInstant().toEpochMilli(); } diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index b2370dadb604c..c7abea63be081 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -29,13 +29,11 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeParseException; import java.time.temporal.TemporalAccessor; import java.util.Locale; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.startsWith; public class JavaJodaTimeDuellingTests extends ESTestCase { @@ -64,11 +62,22 @@ public void testTimeZoneFormatting() { formatter3.parse("20181126T121212.123-0830"); } - public void testCustomTimeFormats() { - assertSameDate("2010 12 06 11:05:15", "yyyy dd MM HH:mm:ss"); - assertSameDate("12/06", "dd/MM"); - assertSameDate("Nov 24 01:29:01 -0800", "MMM dd HH:mm:ss Z"); - } + // this test requires tests to run with -Djava.locale.providers=COMPAT in order to work +// public void testCustomTimeFormats() { +// assertSameDate("2010 12 06 11:05:15", "yyyy dd MM HH:mm:ss"); +// assertSameDate("12/06", "dd/MM"); +// assertSameDate("Nov 24 01:29:01 -0800", "MMM dd HH:mm:ss Z"); +// +// // also ensure that locale based dates are the same +// assertSameDate("Di., 05 Dez. 2000 02:55:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); +// assertSameDate("Mi., 06 Dez. 2000 02:55:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); +// assertSameDate("Do., 07 Dez. 2000 00:00:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); +// assertSameDate("Fr., 08 Dez. 2000 00:00:00 -0800", "E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de")); +// +// DateTime dateTimeNow = DateTime.now(DateTimeZone.UTC); +// ZonedDateTime javaTimeNow = Instant.ofEpochMilli(dateTimeNow.getMillis()).atZone(ZoneOffset.UTC); +// assertSamePrinterOutput("E, d MMM yyyy HH:mm:ss Z", LocaleUtils.parse("de"), javaTimeNow, dateTimeNow); +// } public void testDuellingFormatsValidParsing() { assertSameDate("1522332219", "epoch_second"); @@ -133,10 +142,6 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_millis"); assertSameDate("2018-12-31T12:12:12.1", "date_hour_minute_second_fraction"); - assertSameDate("10000", "date_optional_time"); - assertSameDate("10000T", "date_optional_time"); - assertSameDate("2018", "date_optional_time"); - assertSameDate("2018T", "date_optional_time"); assertSameDate("2018-05", "date_optional_time"); assertSameDate("2018-05-30", "date_optional_time"); assertSameDate("2018-05-30T20", "date_optional_time"); @@ -278,7 +283,7 @@ public void testDuellingFormatsValidParsing() { // joda comes up with a different exception message here, so we have to adapt assertJodaParseException("2012-W1-8", "week_date", "Cannot parse \"2012-W1-8\": Value 8 for dayOfWeek must be in the range [1,7]"); - assertJavaTimeParseException("2012-W1-8", "week_date", "Text '2012-W1-8' could not be parsed"); + assertJavaTimeParseException("2012-W1-8", "week_date"); assertSameDate("2012-W48-6T10:15:30.123Z", "week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+0100", "week_date_time"); @@ -358,6 +363,7 @@ public void testDuelingStrictParsing() { assertParseException("2018-12-1", "strict_date_optional_time"); assertParseException("2018-1-31", "strict_date_optional_time"); assertParseException("10000-01-31", "strict_date_optional_time"); + assertSameDate("2010-01-05T02:00", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30Z", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30+0100", "strict_date_optional_time"); @@ -365,6 +371,7 @@ public void testDuelingStrictParsing() { assertParseException("2018-12-31T10:15:3", "strict_date_optional_time"); assertParseException("2018-12-31T10:5:30", "strict_date_optional_time"); assertParseException("2018-12-31T9:15:30", "strict_date_optional_time"); + assertSameDate("2015-01-04T00:00Z", "strict_date_optional_time"); assertSameDate("2018-12-31T10:15:30.123Z", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123+0100", "strict_date_time"); assertSameDate("2018-12-31T10:15:30.123+01:00", "strict_date_time"); @@ -456,7 +463,7 @@ public void testDuelingStrictParsing() { // joda comes up with a different exception message here, so we have to adapt assertJodaParseException("2012-W01-8", "strict_week_date", "Cannot parse \"2012-W01-8\": Value 8 for dayOfWeek must be in the range [1,7]"); - assertJavaTimeParseException("2012-W01-8", "strict_week_date", "Text '2012-W01-8' could not be parsed"); + assertJavaTimeParseException("2012-W01-8", "strict_week_date"); assertSameDate("2012-W48-6T10:15:30.123Z", "strict_week_date_time"); assertSameDate("2012-W48-6T10:15:30.123+0100", "strict_week_date_time"); @@ -585,19 +592,55 @@ public void testSamePrinterOutput() { assertSamePrinterOutput("strictYear", javaDate, jodaDate); assertSamePrinterOutput("strictYearMonth", javaDate, jodaDate); assertSamePrinterOutput("strictYearMonthDay", javaDate, jodaDate); + assertSamePrinterOutput("strict_date_optional_time", javaDate, jodaDate); + assertSamePrinterOutput("epoch_millis", javaDate, jodaDate); + } + + public void testSamePrinterOutputWithTimeZone() { + String format = "strict_date_optional_time"; + String dateInput = "2017-02-01T08:02:00.000-01:00"; + DateFormatter javaFormatter = DateFormatter.forPattern(format); + TemporalAccessor javaDate = javaFormatter.parse(dateInput); + + DateFormatter jodaFormatter = Joda.forPattern(format); + DateTime dateTime = jodaFormatter.parseJoda(dateInput); + + String javaDateString = javaFormatter.withZone(ZoneOffset.ofHours(-1)).format(javaDate); + String jodaDateString = jodaFormatter.withZone(ZoneOffset.ofHours(-1)).formatJoda(dateTime); + String message = String.format(Locale.ROOT, "expected string representation to be equal for format [%s]: joda [%s], java [%s]", + format, jodaDateString, javaDateString); + assertThat(message, javaDateString, is(jodaDateString)); + } + + public void testDateFormatterWithLocale() { + Locale locale = randomLocale(random()); + String pattern = randomBoolean() ? "strict_date_optional_time||date_time" : "date_time||strict_date_optional_time"; + DateFormatter formatter = DateFormatter.forPattern(pattern).withLocale(locale); + assertThat(formatter.pattern(), is(pattern)); + assertThat(formatter.locale(), is(locale)); } public void testSeveralTimeFormats() { - DateFormatter jodaFormatter = DateFormatter.forPattern("year_month_day||ordinal_date"); - DateFormatter javaFormatter = DateFormatter.forPattern("8year_month_day||ordinal_date"); - assertSameDate("2018-12-12", "year_month_day||ordinal_date", jodaFormatter, javaFormatter); - assertSameDate("2018-128", "year_month_day||ordinal_date", jodaFormatter, javaFormatter); + { + String format = "year_month_day||ordinal_date"; + DateFormatter jodaFormatter = Joda.forPattern(format); + DateFormatter javaFormatter = DateFormatter.forPattern(format); + assertSameDate("2018-12-12", format, jodaFormatter, javaFormatter); + assertSameDate("2018-128", format, jodaFormatter, javaFormatter); + } + { + String format = "strictDateOptionalTime||dd-MM-yyyy"; + DateFormatter jodaFormatter = Joda.forPattern(format); + DateFormatter javaFormatter = DateFormatter.forPattern(format); + assertSameDate("31-01-2014", format, jodaFormatter, javaFormatter); + } } private void assertSamePrinterOutput(String format, ZonedDateTime javaDate, DateTime jodaDate) { assertThat(jodaDate.getMillis(), is(javaDate.toInstant().toEpochMilli())); - String javaTimeOut = DateFormatters.forPattern(format).format(javaDate); - String jodaTimeOut = DateFormatter.forPattern(format).formatJoda(jodaDate); + String javaTimeOut = DateFormatter.forPattern(format).format(javaDate); + String jodaTimeOut = Joda.forPattern(format).formatJoda(jodaDate); + if (JavaVersion.current().getVersion().get(0) == 8 && javaTimeOut.endsWith(".0") && (format.equals("epoch_second") || format.equals("epoch_millis"))) { // java 8 has a bug in DateTimeFormatter usage when printing dates that rely on isSupportedBy for fields, which is @@ -611,7 +654,7 @@ private void assertSamePrinterOutput(String format, ZonedDateTime javaDate, Date private void assertSameDate(String input, String format) { DateFormatter jodaFormatter = Joda.forPattern(format); - DateFormatter javaFormatter = DateFormatters.forPattern(format); + DateFormatter javaFormatter = DateFormatter.forPattern(format); assertSameDate(input, format, jodaFormatter, javaFormatter); } @@ -629,7 +672,7 @@ private void assertSameDate(String input, String format, DateFormatter jodaForma private void assertParseException(String input, String format) { assertJodaParseException(input, format, "Invalid format: \"" + input); - assertJavaTimeParseException(input, format, "Text '" + input + "' could not be parsed"); + assertJavaTimeParseException(input, format); } private void assertJodaParseException(String input, String format, String expectedMessage) { @@ -638,9 +681,10 @@ private void assertJodaParseException(String input, String format, String expect assertThat(e.getMessage(), containsString(expectedMessage)); } - private void assertJavaTimeParseException(String input, String format, String expectedMessage) { - DateFormatter javaTimeFormatter = DateFormatters.forPattern(format); - DateTimeParseException dateTimeParseException = expectThrows(DateTimeParseException.class, () -> javaTimeFormatter.parse(input)); - assertThat(dateTimeParseException.getMessage(), startsWith(expectedMessage)); + private void assertJavaTimeParseException(String input, String format) { + DateFormatter javaTimeFormatter = DateFormatter.forPattern(format); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> javaTimeFormatter.parse(input)); + assertThat(e.getMessage(), containsString(input)); + assertThat(e.getMessage(), containsString(format)); } } diff --git a/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java index e502dfc6f963f..19aea3f19ba3b 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JodaDateMathParserTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTimeZone; +import java.time.Instant; import java.time.ZoneId; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; @@ -35,7 +36,7 @@ public class JodaDateMathParserTests extends ESTestCase { - DateFormatter formatter = DateFormatter.forPattern("dateOptionalTime||epoch_millis"); + DateFormatter formatter = Joda.forPattern("dateOptionalTime||epoch_millis"); DateMathParser parser = formatter.toDateMathParser(); void assertDateMathEquals(String toTest, String expected) { @@ -43,12 +44,12 @@ void assertDateMathEquals(String toTest, String expected) { } void assertDateMathEquals(String toTest, String expected, final long now, boolean roundUp, DateTimeZone timeZone) { - long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone); + long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone).toEpochMilli(); assertDateEquals(gotMillis, toTest, expected); } void assertDateEquals(long gotMillis, String original, String expected) { - long expectedMillis = parser.parse(expected, () -> 0); + long expectedMillis = parser.parse(expected, () -> 0).toEpochMilli(); if (gotMillis != expectedMillis) { fail("Date math not equal\n" + "Original : " + original + "\n" + @@ -147,7 +148,7 @@ public void testMultipleAdjustments() { public void testNow() { - final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, (ZoneId) null); + final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, (ZoneId) null).toEpochMilli(); assertDateMathEquals("now", "2014-11-18T14:27:32", now, false, null); assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, false, null); @@ -164,10 +165,10 @@ public void testRoundingPreservesEpochAsBaseDate() { DateMathParser parser = formatter.toDateMathParser(); assertEquals( this.formatter.parseMillis("1970-01-01T04:52:20.000Z"), - parser.parse("04:52:20", () -> 0, false, (ZoneId) null)); + parser.parse("04:52:20", () -> 0, false, (ZoneId) null).toEpochMilli()); assertEquals( this.formatter.parseMillis("1970-01-01T04:52:20.999Z"), - parser.parse("04:52:20", () -> 0, true, (ZoneId) null)); + parser.parse("04:52:20", () -> 0, true, (ZoneId) null).toEpochMilli()); } // Implicit rounding happening when parts of the date are not specified @@ -185,9 +186,9 @@ public void testImplicitRounding() { assertDateMathEquals("2014-11-18T09:20", "2014-11-18T08:20:59.999Z", 0, true, DateTimeZone.forID("CET")); // implicit rounding with explicit timezone in the date format - DateFormatter formatter = DateFormatter.forPattern("yyyy-MM-ddZ"); + DateFormatter formatter = Joda.forPattern("yyyy-MM-ddZ"); DateMathParser parser = formatter.toDateMathParser(); - long time = parser.parse("2011-10-09+01:00", () -> 0, false, (ZoneId) null); + Instant time = parser.parse("2011-10-09+01:00", () -> 0, false, (ZoneId) null); assertEquals(this.parser.parse("2011-10-09T00:00:00.000+01:00", () -> 0), time); time = parser.parse("2011-10-09+01:00", () -> 0, true, (ZoneId) null); assertEquals(this.parser.parse("2011-10-09T23:59:59.999+01:00", () -> 0), time); @@ -261,7 +262,7 @@ public void testTimestamps() { // also check other time units JodaDateMathParser parser = new JodaDateMathParser(Joda.forPattern("epoch_second")); - long datetime = parser.parse("1418248078", () -> 0); + long datetime = parser.parse("1418248078", () -> 0).toEpochMilli(); assertDateEquals(datetime, "1418248078", "2014-12-10T21:47:58.000"); // a timestamp before 10000 is a year diff --git a/server/src/test/java/org/elasticsearch/common/joda/JodaTests.java b/server/src/test/java/org/elasticsearch/common/joda/JodaTests.java index fde9d73fae892..003785b3c87b3 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JodaTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JodaTests.java @@ -26,15 +26,18 @@ import java.time.ZoneOffset; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + public class JodaTests extends ESTestCase { public void testBasicTTimePattern() { - DateFormatter formatter1 = DateFormatter.forPattern("basic_t_time"); + DateFormatter formatter1 = Joda.forPattern("basic_t_time"); assertEquals(formatter1.pattern(), "basic_t_time"); assertEquals(formatter1.zone(), ZoneOffset.UTC); - DateFormatter formatter2 = DateFormatter.forPattern("basicTTime"); + DateFormatter formatter2 = Joda.forPattern("basicTTime"); assertEquals(formatter2.pattern(), "basicTTime"); assertEquals(formatter2.zone(), ZoneOffset.UTC); @@ -42,9 +45,25 @@ public void testBasicTTimePattern() { assertEquals("T102030.040Z", formatter1.formatJoda(dt)); assertEquals("T102030.040Z", formatter1.formatJoda(dt)); - expectThrows(IllegalArgumentException.class, () -> DateFormatter.forPattern("basic_t_Time")); - expectThrows(IllegalArgumentException.class, () -> DateFormatter.forPattern("basic_T_Time")); - expectThrows(IllegalArgumentException.class, () -> DateFormatter.forPattern("basic_T_time")); + expectThrows(IllegalArgumentException.class, () -> Joda.forPattern("basic_t_Time")); + expectThrows(IllegalArgumentException.class, () -> Joda.forPattern("basic_T_Time")); + expectThrows(IllegalArgumentException.class, () -> Joda.forPattern("basic_T_time")); } + public void testEqualsAndHashcode() { + String format = randomFrom("yyyy/MM/dd HH:mm:ss", "basic_t_time"); + JodaDateFormatter first = Joda.forPattern(format); + JodaDateFormatter second = Joda.forPattern(format); + JodaDateFormatter third = Joda.forPattern(" HH:mm:ss, yyyy/MM/dd"); + + assertThat(first, is(second)); + assertThat(second, is(first)); + assertThat(first, is(not(third))); + assertThat(second, is(not(third))); + + assertThat(first.hashCode(), is(second.hashCode())); + assertThat(second.hashCode(), is(first.hashCode())); + assertThat(first.hashCode(), is(not(third.hashCode()))); + assertThat(second.hashCode(), is(not(third.hashCode()))); + } } diff --git a/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java b/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java deleted file mode 100644 index b6f1b1b650a6f..0000000000000 --- a/server/src/test/java/org/elasticsearch/common/joda/SimpleJodaTests.java +++ /dev/null @@ -1,800 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.joda; - -import org.elasticsearch.common.time.DateFormatter; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.mapper.RootObjectMapper; -import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.LocalDateTime; -import org.joda.time.MutableDateTime; -import org.joda.time.format.DateTimeFormat; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.DateTimeFormatterBuilder; -import org.joda.time.format.DateTimeParser; -import org.joda.time.format.ISODateTimeFormat; - -import java.util.Date; -import java.util.Locale; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.endsWith; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.is; - -public class SimpleJodaTests extends ESTestCase { - public void testMultiParsers() { - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); - DateTimeParser[] parsers = new DateTimeParser[3]; - parsers[0] = DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getParser(); - parsers[1] = DateTimeFormat.forPattern("MM-dd-yyyy").withZone(DateTimeZone.UTC).getParser(); - parsers[2] = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withZone(DateTimeZone.UTC).getParser(); - builder.append(DateTimeFormat.forPattern("MM/dd/yyyy").withZone(DateTimeZone.UTC).getPrinter(), parsers); - - DateTimeFormatter formatter = builder.toFormatter(); - - formatter.parseMillis("2009-11-15 14:12:12"); - } - - public void testIsoDateFormatDateTimeNoMillisUTC() { - DateTimeFormatter formatter = ISODateTimeFormat.dateTimeNoMillis().withZone(DateTimeZone.UTC); - long millis = formatter.parseMillis("1970-01-01T00:00:00Z"); - - assertThat(millis, equalTo(0L)); - } - - public void testUpperBound() { - MutableDateTime dateTime = new MutableDateTime(3000, 12, 31, 23, 59, 59, 999, DateTimeZone.UTC); - DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); - - String value = "2000-01-01"; - int i = formatter.parseInto(dateTime, value, 0); - assertThat(i, equalTo(value.length())); - assertThat(dateTime.toString(), equalTo("2000-01-01T23:59:59.999Z")); - } - - public void testIsoDateFormatDateOptionalTimeUTC() { - DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); - long millis = formatter.parseMillis("1970-01-01T00:00:00Z"); - assertThat(millis, equalTo(0L)); - millis = formatter.parseMillis("1970-01-01T00:00:00.001Z"); - assertThat(millis, equalTo(1L)); - millis = formatter.parseMillis("1970-01-01T00:00:00.1Z"); - assertThat(millis, equalTo(100L)); - millis = formatter.parseMillis("1970-01-01T00:00:00.1"); - assertThat(millis, equalTo(100L)); - millis = formatter.parseMillis("1970-01-01T00:00:00"); - assertThat(millis, equalTo(0L)); - millis = formatter.parseMillis("1970-01-01"); - assertThat(millis, equalTo(0L)); - - millis = formatter.parseMillis("1970"); - assertThat(millis, equalTo(0L)); - - try { - formatter.parseMillis("1970 kuku"); - fail("formatting should fail"); - } catch (IllegalArgumentException e) { - // all is well - } - - // test offset in format - millis = formatter.parseMillis("1970-01-01T00:00:00-02:00"); - assertThat(millis, equalTo(TimeValue.timeValueHours(2).millis())); - } - - public void testIsoVsCustom() { - DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); - long millis = formatter.parseMillis("1970-01-01T00:00:00"); - assertThat(millis, equalTo(0L)); - - formatter = DateTimeFormat.forPattern("yyyy/MM/dd HH:mm:ss").withZone(DateTimeZone.UTC); - millis = formatter.parseMillis("1970/01/01 00:00:00"); - assertThat(millis, equalTo(0L)); - - DateFormatter formatter2 = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss"); - millis = formatter2.parseMillis("1970/01/01 00:00:00"); - assertThat(millis, equalTo(0L)); - } - - public void testWriteAndParse() { - DateTimeFormatter dateTimeWriter = ISODateTimeFormat.dateTime().withZone(DateTimeZone.UTC); - DateTimeFormatter formatter = ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC); - Date date = new Date(); - assertThat(formatter.parseMillis(dateTimeWriter.print(date.getTime())), equalTo(date.getTime())); - } - - public void testSlashInFormat() { - DateFormatter formatter = DateFormatter.forPattern("MM/yyyy"); - formatter.parseMillis("01/2001"); - - DateFormatter formatter2 = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss"); - long millis = formatter2.parseMillis("1970/01/01 00:00:00"); - formatter2.formatMillis(millis); - - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter2.parseMillis("1970/01/01")); - } - - public void testMultipleFormats() { - DateFormatter formatter = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd"); - long millis = formatter.parseMillis("1970/01/01 00:00:00"); - assertThat("1970/01/01 00:00:00", is(formatter.formatMillis(millis))); - } - - public void testMultipleDifferentFormats() { - DateFormatter formatter = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd"); - String input = "1970/01/01 00:00:00"; - long millis = formatter.parseMillis(input); - assertThat(input, is(formatter.formatMillis(millis))); - - DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||dateOptionalTime"); - DateFormatter.forPattern("dateOptionalTime||yyyy/MM/dd HH:mm:ss||yyyy/MM/dd"); - DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||dateOptionalTime||yyyy/MM/dd"); - DateFormatter.forPattern("date_time||date_time_no_millis"); - DateFormatter.forPattern(" date_time || date_time_no_millis"); - } - - public void testInvalidPatterns() { - expectInvalidPattern("does_not_exist_pattern", "Invalid format: [does_not_exist_pattern]: Illegal pattern component: o"); - expectInvalidPattern("OOOOO", "Invalid format: [OOOOO]: Illegal pattern component: OOOOO"); - expectInvalidPattern(null, "No date pattern provided"); - expectInvalidPattern("", "No date pattern provided"); - expectInvalidPattern(" ", "No date pattern provided"); - expectInvalidPattern("||date_time_no_millis", "No date pattern provided"); - expectInvalidPattern("date_time_no_millis||", "No date pattern provided"); - } - - private void expectInvalidPattern(String pattern, String errorMessage) { - try { - DateFormatter.forPattern(pattern); - fail("Pattern " + pattern + " should have thrown an exception but did not"); - } catch (IllegalArgumentException e) { - assertThat(e.getMessage(), containsString(errorMessage)); - } - } - - public void testRounding() { - long TIME = utcTimeInMillis("2009-02-03T01:01:01"); - MutableDateTime time = new MutableDateTime(DateTimeZone.UTC); - time.setMillis(TIME); - assertThat(time.monthOfYear().roundFloor().toString(), equalTo("2009-02-01T00:00:00.000Z")); - time.setMillis(TIME); - assertThat(time.hourOfDay().roundFloor().toString(), equalTo("2009-02-03T01:00:00.000Z")); - time.setMillis(TIME); - assertThat(time.dayOfMonth().roundFloor().toString(), equalTo("2009-02-03T00:00:00.000Z")); - } - - public void testRoundingSetOnTime() { - MutableDateTime time = new MutableDateTime(DateTimeZone.UTC); - time.setRounding(time.getChronology().monthOfYear(), MutableDateTime.ROUND_FLOOR); - time.setMillis(utcTimeInMillis("2009-02-03T01:01:01")); - assertThat(time.toString(), equalTo("2009-02-01T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-01T00:00:00.000Z"))); - - time.setMillis(utcTimeInMillis("2009-05-03T01:01:01")); - assertThat(time.toString(), equalTo("2009-05-01T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-05-01T00:00:00.000Z"))); - - time = new MutableDateTime(DateTimeZone.UTC); - time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR); - time.setMillis(utcTimeInMillis("2009-02-03T01:01:01")); - assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-03T00:00:00.000Z"))); - - time.setMillis(utcTimeInMillis("2009-02-02T23:01:01")); - assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2009-02-02T00:00:00.000Z"))); - - time = new MutableDateTime(DateTimeZone.UTC); - time.setRounding(time.getChronology().weekOfWeekyear(), MutableDateTime.ROUND_FLOOR); - time.setMillis(utcTimeInMillis("2011-05-05T01:01:01")); - assertThat(time.toString(), equalTo("2011-05-02T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTimeInMillis("2011-05-02T00:00:00.000Z"))); - } - - public void testRoundingWithTimeZone() { - MutableDateTime time = new MutableDateTime(DateTimeZone.UTC); - time.setZone(DateTimeZone.forOffsetHours(-2)); - time.setRounding(time.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR); - - MutableDateTime utcTime = new MutableDateTime(DateTimeZone.UTC); - utcTime.setRounding(utcTime.getChronology().dayOfMonth(), MutableDateTime.ROUND_FLOOR); - - time.setMillis(utcTimeInMillis("2009-02-03T01:01:01")); - utcTime.setMillis(utcTimeInMillis("2009-02-03T01:01:01")); - - assertThat(time.toString(), equalTo("2009-02-02T00:00:00.000-02:00")); - assertThat(utcTime.toString(), equalTo("2009-02-03T00:00:00.000Z")); - // the time is on the 2nd, and utcTime is on the 3rd, but, because time already encapsulates - // time zone, the millis diff is not 24, but 22 hours - assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis())); - - time.setMillis(utcTimeInMillis("2009-02-04T01:01:01")); - utcTime.setMillis(utcTimeInMillis("2009-02-04T01:01:01")); - assertThat(time.toString(), equalTo("2009-02-03T00:00:00.000-02:00")); - assertThat(utcTime.toString(), equalTo("2009-02-04T00:00:00.000Z")); - assertThat(time.getMillis(), equalTo(utcTime.getMillis() - TimeValue.timeValueHours(22).millis())); - } - - public void testThatEpochsCanBeParsed() { - boolean parseMilliSeconds = randomBoolean(); - - // epoch: 1433144433655 => date: Mon Jun 1 09:40:33.655 CEST 2015 - DateFormatter formatter = DateFormatter.forPattern(parseMilliSeconds ? "epoch_millis" : "epoch_second"); - DateTime dateTime = formatter.parseJoda(parseMilliSeconds ? "1433144433655" : "1433144433"); - - assertThat(dateTime.getYear(), is(2015)); - assertThat(dateTime.getDayOfMonth(), is(1)); - assertThat(dateTime.getMonthOfYear(), is(6)); - assertThat(dateTime.getHourOfDay(), is(7)); // utc timezone, +2 offset due to CEST - assertThat(dateTime.getMinuteOfHour(), is(40)); - assertThat(dateTime.getSecondOfMinute(), is(33)); - - if (parseMilliSeconds) { - assertThat(dateTime.getMillisOfSecond(), is(655)); - } else { - assertThat(dateTime.getMillisOfSecond(), is(0)); - } - - // test floats get truncated - String epochFloatValue = String.format(Locale.US, "%d.%d", dateTime.getMillis() / (parseMilliSeconds ? 1L : 1000L), - randomNonNegativeLong()); - assertThat(formatter.parseJoda(epochFloatValue).getMillis(), is(dateTime.getMillis())); - } - - public void testThatNegativeEpochsCanBeParsed() { - // problem: negative epochs can be arbitrary in size... - boolean parseMilliSeconds = randomBoolean(); - DateFormatter formatter = DateFormatter.forPattern(parseMilliSeconds ? "epoch_millis" : "epoch_second"); - DateTime dateTime = formatter.parseJoda("-10000"); - - assertThat(dateTime.getYear(), is(1969)); - assertThat(dateTime.getMonthOfYear(), is(12)); - assertThat(dateTime.getDayOfMonth(), is(31)); - if (parseMilliSeconds) { - assertThat(dateTime.getHourOfDay(), is(23)); // utc timezone, +2 offset due to CEST - assertThat(dateTime.getMinuteOfHour(), is(59)); - assertThat(dateTime.getSecondOfMinute(), is(50)); - } else { - assertThat(dateTime.getHourOfDay(), is(21)); // utc timezone, +2 offset due to CEST - assertThat(dateTime.getMinuteOfHour(), is(13)); - assertThat(dateTime.getSecondOfMinute(), is(20)); - } - - // test floats get truncated - String epochFloatValue = String.format(Locale.US, "%d.%d", dateTime.getMillis() / (parseMilliSeconds ? 1L : 1000L), - randomNonNegativeLong()); - assertThat(formatter.parseJoda(epochFloatValue).getMillis(), is(dateTime.getMillis())); - - // every negative epoch must be parsed, no matter if exact the size or bigger - if (parseMilliSeconds) { - formatter.parseJoda("-100000000"); - formatter.parseJoda("-999999999999"); - formatter.parseJoda("-1234567890123"); - formatter.parseJoda("-1234567890123456789"); - - formatter.parseJoda("-1234567890123.9999"); - formatter.parseJoda("-1234567890123456789.9999"); - } else { - formatter.parseJoda("-100000000"); - formatter.parseJoda("-1234567890"); - formatter.parseJoda("-1234567890123456"); - - formatter.parseJoda("-1234567890.9999"); - formatter.parseJoda("-1234567890123456.9999"); - } - - assertWarnings("Use of negative values" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - } - - public void testForInvalidDatesInEpochSecond() { - DateFormatter formatter = DateFormatter.forPattern("epoch_second"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter.parseJoda(randomFrom("invalid date", "12345678901234567", "12345678901234567890"))); - assertThat(e.getMessage(), containsString("Invalid format")); - } - - public void testForInvalidDatesInEpochMillis() { - DateFormatter formatter = DateFormatter.forPattern("epoch_millis"); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter.parseJoda(randomFrom("invalid date", "12345678901234567890"))); - assertThat(e.getMessage(), containsString("Invalid format")); - } - - public void testForInvalidTimeZoneWithEpochSeconds() { - DateTimeFormatter dateTimeFormatter = new DateTimeFormatterBuilder() - .append(new Joda.EpochTimeParser(false)) - .toFormatter() - .withZone(DateTimeZone.forOffsetHours(1)) - .withLocale(Locale.ROOT); - DateFormatter formatter = - new JodaDateFormatter("epoch_seconds", dateTimeFormatter, dateTimeFormatter); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter.parseJoda("1433144433655")); - assertThat(e.getMessage(), containsString("time_zone must be UTC")); - } - - public void testForInvalidTimeZoneWithEpochMillis() { - DateTimeFormatter dateTimeFormatter = new DateTimeFormatterBuilder() - .append(new Joda.EpochTimeParser(true)) - .toFormatter() - .withZone(DateTimeZone.forOffsetHours(1)) - .withLocale(Locale.ROOT); - DateFormatter formatter = - new JodaDateFormatter("epoch_millis", dateTimeFormatter, dateTimeFormatter); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> - formatter.parseJoda("1433144433")); - assertThat(e.getMessage(), containsString("time_zone must be UTC")); - } - - public void testThatEpochParserIsPrinter() { - JodaDateFormatter formatter = Joda.forPattern("epoch_millis"); - assertThat(formatter.parser.isPrinter(), is(true)); - assertThat(formatter.printer.isPrinter(), is(true)); - - JodaDateFormatter epochSecondFormatter = Joda.forPattern("epoch_second"); - assertThat(epochSecondFormatter.parser.isPrinter(), is(true)); - assertThat(epochSecondFormatter.printer.isPrinter(), is(true)); - } - - public void testThatEpochTimePrinterWorks() { - StringBuffer buffer = new StringBuffer(); - LocalDateTime now = LocalDateTime.now(); - - Joda.EpochTimePrinter epochTimePrinter = new Joda.EpochTimePrinter(false); - epochTimePrinter.printTo(buffer, now, Locale.ROOT); - assertThat(buffer.length(), is(10)); - // only check the last digit, as seconds go from 0-99 in the unix timestamp and don't stop at 60 - assertThat(buffer.toString(), endsWith(String.valueOf(now.getSecondOfMinute() % 10))); - - buffer = new StringBuffer(); - Joda.EpochTimePrinter epochMilliSecondTimePrinter = new Joda.EpochTimePrinter(true); - epochMilliSecondTimePrinter.printTo(buffer, now, Locale.ROOT); - assertThat(buffer.length(), is(13)); - assertThat(buffer.toString(), endsWith(String.valueOf(now.getMillisOfSecond()))); - } - - public void testThatEpochParserIsIdempotent() { - DateFormatter formatter = DateFormatter.forPattern("epoch_millis"); - DateTime dateTime = formatter.parseJoda("1234567890123"); - assertThat(dateTime.getMillis(), is(1234567890123L)); - dateTime = formatter.parseJoda("1234567890456"); - assertThat(dateTime.getMillis(), is(1234567890456L)); - dateTime = formatter.parseJoda("1234567890789"); - assertThat(dateTime.getMillis(), is(1234567890789L)); - dateTime = formatter.parseJoda("1234567890123456789"); - assertThat(dateTime.getMillis(), is(1234567890123456789L)); - - DateFormatter secondsFormatter = DateFormatter.forPattern("epoch_second"); - DateTime secondsDateTime = secondsFormatter.parseJoda("1234567890"); - assertThat(secondsDateTime.getMillis(), is(1234567890000L)); - secondsDateTime = secondsFormatter.parseJoda("1234567890"); - assertThat(secondsDateTime.getMillis(), is(1234567890000L)); - secondsDateTime = secondsFormatter.parseJoda("1234567890"); - assertThat(secondsDateTime.getMillis(), is(1234567890000L)); - secondsDateTime = secondsFormatter.parseJoda("1234567890123456"); - assertThat(secondsDateTime.getMillis(), is(1234567890123456000L)); - } - - public void testThatDefaultFormatterChecksForCorrectYearLength() throws Exception { - // if no strict version is tested, this means the date format is already strict by itself - // yyyyMMdd - assertValidDateFormatParsing("basicDate", "20140303"); - assertDateFormatParsingThrowingException("basicDate", "2010303"); - - // yyyyMMdd’T'HHmmss.SSSZ - assertValidDateFormatParsing("basicDateTime", "20140303T124343.123Z"); - assertValidDateFormatParsing("basicDateTime", "00050303T124343.123Z"); - assertDateFormatParsingThrowingException("basicDateTime", "50303T124343.123Z"); - - // yyyyMMdd’T'HHmmssZ - assertValidDateFormatParsing("basicDateTimeNoMillis", "20140303T124343Z"); - assertValidDateFormatParsing("basicDateTimeNoMillis", "00050303T124343Z"); - assertDateFormatParsingThrowingException("basicDateTimeNoMillis", "50303T124343Z"); - - // yyyyDDD - assertValidDateFormatParsing("basicOrdinalDate", "0005165"); - assertDateFormatParsingThrowingException("basicOrdinalDate", "5165"); - - // yyyyDDD’T'HHmmss.SSSZ - assertValidDateFormatParsing("basicOrdinalDateTime", "0005165T124343.123Z"); - assertValidDateFormatParsing("basicOrdinalDateTime", "0005165T124343.123Z"); - assertDateFormatParsingThrowingException("basicOrdinalDateTime", "5165T124343.123Z"); - - // yyyyDDD’T'HHmmssZ - assertValidDateFormatParsing("basicOrdinalDateTimeNoMillis", "0005165T124343Z"); - assertValidDateFormatParsing("basicOrdinalDateTimeNoMillis", "0005165T124343Z"); - assertDateFormatParsingThrowingException("basicOrdinalDateTimeNoMillis", "5165T124343Z"); - - // HHmmss.SSSZ - assertValidDateFormatParsing("basicTime", "090909.123Z"); - assertDateFormatParsingThrowingException("basicTime", "90909.123Z"); - - // HHmmssZ - assertValidDateFormatParsing("basicTimeNoMillis", "090909Z"); - assertDateFormatParsingThrowingException("basicTimeNoMillis", "90909Z"); - - // 'T’HHmmss.SSSZ - assertValidDateFormatParsing("basicTTime", "T090909.123Z"); - assertDateFormatParsingThrowingException("basicTTime", "T90909.123Z"); - - // T’HHmmssZ - assertValidDateFormatParsing("basicTTimeNoMillis", "T090909Z"); - assertDateFormatParsingThrowingException("basicTTimeNoMillis", "T90909Z"); - - // xxxx’W'wwe - assertValidDateFormatParsing("basicWeekDate", "0005W414"); - assertValidDateFormatParsing("basicWeekDate", "5W414", "0005W414"); - assertDateFormatParsingThrowingException("basicWeekDate", "5W14"); - - assertValidDateFormatParsing("strictBasicWeekDate", "0005W414"); - assertDateFormatParsingThrowingException("strictBasicWeekDate", "0005W47"); - assertDateFormatParsingThrowingException("strictBasicWeekDate", "5W414"); - assertDateFormatParsingThrowingException("strictBasicWeekDate", "5W14"); - - // xxxx’W'wwe’T'HHmmss.SSSZ - assertValidDateFormatParsing("basicWeekDateTime", "0005W414T124343.123Z"); - assertValidDateFormatParsing("basicWeekDateTime", "5W414T124343.123Z", "0005W414T124343.123Z"); - assertDateFormatParsingThrowingException("basicWeekDateTime", "5W14T124343.123Z"); - - assertValidDateFormatParsing("strictBasicWeekDateTime", "0005W414T124343.123Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTime", "0005W47T124343.123Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTime", "5W414T124343.123Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTime", "5W14T124343.123Z"); - - // xxxx’W'wwe’T'HHmmssZ - assertValidDateFormatParsing("basicWeekDateTimeNoMillis", "0005W414T124343Z"); - assertValidDateFormatParsing("basicWeekDateTimeNoMillis", "5W414T124343Z", "0005W414T124343Z"); - assertDateFormatParsingThrowingException("basicWeekDateTimeNoMillis", "5W14T124343Z"); - - assertValidDateFormatParsing("strictBasicWeekDateTimeNoMillis", "0005W414T124343Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTimeNoMillis", "0005W47T124343Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTimeNoMillis", "5W414T124343Z"); - assertDateFormatParsingThrowingException("strictBasicWeekDateTimeNoMillis", "5W14T124343Z"); - - // yyyy-MM-dd - assertValidDateFormatParsing("date", "0005-06-03"); - assertValidDateFormatParsing("date", "5-6-3", "0005-06-03"); - - assertValidDateFormatParsing("strictDate", "0005-06-03"); - assertDateFormatParsingThrowingException("strictDate", "5-6-3"); - assertDateFormatParsingThrowingException("strictDate", "0005-06-3"); - assertDateFormatParsingThrowingException("strictDate", "0005-6-03"); - assertDateFormatParsingThrowingException("strictDate", "5-06-03"); - - // yyyy-MM-dd'T'HH - assertValidDateFormatParsing("dateHour", "0005-06-03T12"); - assertValidDateFormatParsing("dateHour", "5-6-3T1", "0005-06-03T01"); - - assertValidDateFormatParsing("strictDateHour", "0005-06-03T12"); - assertDateFormatParsingThrowingException("strictDateHour", "5-6-3T1"); - - // yyyy-MM-dd'T'HH:mm - assertValidDateFormatParsing("dateHourMinute", "0005-06-03T12:12"); - assertValidDateFormatParsing("dateHourMinute", "5-6-3T12:1", "0005-06-03T12:01"); - - assertValidDateFormatParsing("strictDateHourMinute", "0005-06-03T12:12"); - assertDateFormatParsingThrowingException("strictDateHourMinute", "5-6-3T12:1"); - - // yyyy-MM-dd'T'HH:mm:ss - assertValidDateFormatParsing("dateHourMinuteSecond", "0005-06-03T12:12:12"); - assertValidDateFormatParsing("dateHourMinuteSecond", "5-6-3T12:12:1", "0005-06-03T12:12:01"); - - assertValidDateFormatParsing("strictDateHourMinuteSecond", "0005-06-03T12:12:12"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecond", "5-6-3T12:12:1"); - - // yyyy-MM-dd’T'HH:mm:ss.SSS - assertValidDateFormatParsing("dateHourMinuteSecondFraction", "0005-06-03T12:12:12.123"); - assertValidDateFormatParsing("dateHourMinuteSecondFraction", "5-6-3T12:12:1.123", "0005-06-03T12:12:01.123"); - assertValidDateFormatParsing("dateHourMinuteSecondFraction", "5-6-3T12:12:1.1", "0005-06-03T12:12:01.100"); - - assertValidDateFormatParsing("strictDateHourMinuteSecondFraction", "0005-06-03T12:12:12.123"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecondFraction", "5-6-3T12:12:12.1"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecondFraction", "5-6-3T12:12:12.12"); - - assertValidDateFormatParsing("dateHourMinuteSecondMillis", "0005-06-03T12:12:12.123"); - assertValidDateFormatParsing("dateHourMinuteSecondMillis", "5-6-3T12:12:1.123", "0005-06-03T12:12:01.123"); - assertValidDateFormatParsing("dateHourMinuteSecondMillis", "5-6-3T12:12:1.1", "0005-06-03T12:12:01.100"); - - assertValidDateFormatParsing("strictDateHourMinuteSecondMillis", "0005-06-03T12:12:12.123"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecondMillis", "5-6-3T12:12:12.1"); - assertDateFormatParsingThrowingException("strictDateHourMinuteSecondMillis", "5-6-3T12:12:12.12"); - - // yyyy-MM-dd'T'HH:mm:ss.SSSZ - assertValidDateFormatParsing("dateOptionalTime", "2014-03-03", "2014-03-03T00:00:00.000Z"); - assertValidDateFormatParsing("dateOptionalTime", "1257-3-03", "1257-03-03T00:00:00.000Z"); - assertValidDateFormatParsing("dateOptionalTime", "0005-03-3", "0005-03-03T00:00:00.000Z"); - assertValidDateFormatParsing("dateOptionalTime", "5-03-03", "0005-03-03T00:00:00.000Z"); - assertValidDateFormatParsing("dateOptionalTime", "5-03-03T1:1:1.1", "0005-03-03T01:01:01.100Z"); - assertValidDateFormatParsing("strictDateOptionalTime", "2014-03-03", "2014-03-03T00:00:00.000Z"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "0005-3-03"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "0005-03-3"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T1:1:1.1"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T01:01:01.1"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T01:01:1.100"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T01:1:01.100"); - assertDateFormatParsingThrowingException("strictDateOptionalTime", "5-03-03T1:01:01.100"); - - // yyyy-MM-dd’T'HH:mm:ss.SSSZZ - assertValidDateFormatParsing("dateTime", "5-03-03T1:1:1.1Z", "0005-03-03T01:01:01.100Z"); - assertValidDateFormatParsing("strictDateTime", "2014-03-03T11:11:11.100Z", "2014-03-03T11:11:11.100Z"); - assertDateFormatParsingThrowingException("strictDateTime", "0005-03-03T1:1:1.1Z"); - assertDateFormatParsingThrowingException("strictDateTime", "0005-03-03T01:01:1.100Z"); - assertDateFormatParsingThrowingException("strictDateTime", "0005-03-03T01:1:01.100Z"); - assertDateFormatParsingThrowingException("strictDateTime", "0005-03-03T1:01:01.100Z"); - - // yyyy-MM-dd’T'HH:mm:ssZZ - assertValidDateFormatParsing("dateTimeNoMillis", "5-03-03T1:1:1Z", "0005-03-03T01:01:01Z"); - assertValidDateFormatParsing("strictDateTimeNoMillis", "2014-03-03T11:11:11Z", "2014-03-03T11:11:11Z"); - assertDateFormatParsingThrowingException("strictDateTimeNoMillis", "0005-03-03T1:1:1Z"); - assertDateFormatParsingThrowingException("strictDateTimeNoMillis", "0005-03-03T01:01:1Z"); - assertDateFormatParsingThrowingException("strictDateTimeNoMillis", "0005-03-03T01:1:01Z"); - assertDateFormatParsingThrowingException("strictDateTimeNoMillis", "0005-03-03T1:01:01Z"); - - // HH - assertValidDateFormatParsing("hour", "12"); - assertValidDateFormatParsing("hour", "1", "01"); - assertValidDateFormatParsing("strictHour", "12"); - assertValidDateFormatParsing("strictHour", "01"); - assertDateFormatParsingThrowingException("strictHour", "1"); - - // HH:mm - assertValidDateFormatParsing("hourMinute", "12:12"); - assertValidDateFormatParsing("hourMinute", "12:1", "12:01"); - assertValidDateFormatParsing("strictHourMinute", "12:12"); - assertValidDateFormatParsing("strictHourMinute", "12:01"); - assertDateFormatParsingThrowingException("strictHourMinute", "12:1"); - - // HH:mm:ss - assertValidDateFormatParsing("hourMinuteSecond", "12:12:12"); - assertValidDateFormatParsing("hourMinuteSecond", "12:12:1", "12:12:01"); - assertValidDateFormatParsing("strictHourMinuteSecond", "12:12:12"); - assertValidDateFormatParsing("strictHourMinuteSecond", "12:12:01"); - assertDateFormatParsingThrowingException("strictHourMinuteSecond", "12:12:1"); - - // HH:mm:ss.SSS - assertValidDateFormatParsing("hourMinuteSecondFraction", "12:12:12.123"); - assertValidDateFormatParsing("hourMinuteSecondFraction", "12:12:12.1", "12:12:12.100"); - assertValidDateFormatParsing("strictHourMinuteSecondFraction", "12:12:12.123"); - assertValidDateFormatParsing("strictHourMinuteSecondFraction", "12:12:12.1", "12:12:12.100"); - - assertValidDateFormatParsing("hourMinuteSecondMillis", "12:12:12.123"); - assertValidDateFormatParsing("hourMinuteSecondMillis", "12:12:12.1", "12:12:12.100"); - assertValidDateFormatParsing("strictHourMinuteSecondMillis", "12:12:12.123"); - assertValidDateFormatParsing("strictHourMinuteSecondMillis", "12:12:12.1", "12:12:12.100"); - - // yyyy-DDD - assertValidDateFormatParsing("ordinalDate", "5-3", "0005-003"); - assertValidDateFormatParsing("strictOrdinalDate", "0005-003"); - assertDateFormatParsingThrowingException("strictOrdinalDate", "5-3"); - assertDateFormatParsingThrowingException("strictOrdinalDate", "0005-3"); - assertDateFormatParsingThrowingException("strictOrdinalDate", "5-003"); - - // yyyy-DDD’T'HH:mm:ss.SSSZZ - assertValidDateFormatParsing("ordinalDateTime", "5-3T12:12:12.100Z", "0005-003T12:12:12.100Z"); - assertValidDateFormatParsing("strictOrdinalDateTime", "0005-003T12:12:12.100Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTime", "5-3T1:12:12.123Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTime", "5-3T12:1:12.123Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTime", "5-3T12:12:1.123Z"); - - // yyyy-DDD’T'HH:mm:ssZZ - assertValidDateFormatParsing("ordinalDateTimeNoMillis", "5-3T12:12:12Z", "0005-003T12:12:12Z"); - assertValidDateFormatParsing("strictOrdinalDateTimeNoMillis", "0005-003T12:12:12Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTimeNoMillis", "5-3T1:12:12Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTimeNoMillis", "5-3T12:1:12Z"); - assertDateFormatParsingThrowingException("strictOrdinalDateTimeNoMillis", "5-3T12:12:1Z"); - - - // HH:mm:ss.SSSZZ - assertValidDateFormatParsing("time", "12:12:12.100Z"); - assertValidDateFormatParsing("time", "01:01:01.1Z", "01:01:01.100Z"); - assertValidDateFormatParsing("time", "1:1:1.1Z", "01:01:01.100Z"); - assertValidDateFormatParsing("strictTime", "12:12:12.100Z"); - assertDateFormatParsingThrowingException("strictTime", "12:12:1.100Z"); - assertDateFormatParsingThrowingException("strictTime", "12:1:12.100Z"); - assertDateFormatParsingThrowingException("strictTime", "1:12:12.100Z"); - - // HH:mm:ssZZ - assertValidDateFormatParsing("timeNoMillis", "12:12:12Z"); - assertValidDateFormatParsing("timeNoMillis", "01:01:01Z", "01:01:01Z"); - assertValidDateFormatParsing("timeNoMillis", "1:1:1Z", "01:01:01Z"); - assertValidDateFormatParsing("strictTimeNoMillis", "12:12:12Z"); - assertDateFormatParsingThrowingException("strictTimeNoMillis", "12:12:1Z"); - assertDateFormatParsingThrowingException("strictTimeNoMillis", "12:1:12Z"); - assertDateFormatParsingThrowingException("strictTimeNoMillis", "1:12:12Z"); - - // 'T’HH:mm:ss.SSSZZ - assertValidDateFormatParsing("tTime", "T12:12:12.100Z"); - assertValidDateFormatParsing("tTime", "T01:01:01.1Z", "T01:01:01.100Z"); - assertValidDateFormatParsing("tTime", "T1:1:1.1Z", "T01:01:01.100Z"); - assertValidDateFormatParsing("strictTTime", "T12:12:12.100Z"); - assertDateFormatParsingThrowingException("strictTTime", "T12:12:1.100Z"); - assertDateFormatParsingThrowingException("strictTTime", "T12:1:12.100Z"); - assertDateFormatParsingThrowingException("strictTTime", "T1:12:12.100Z"); - - // 'T’HH:mm:ssZZ - assertValidDateFormatParsing("tTimeNoMillis", "T12:12:12Z"); - assertValidDateFormatParsing("tTimeNoMillis", "T01:01:01Z", "T01:01:01Z"); - assertValidDateFormatParsing("tTimeNoMillis", "T1:1:1Z", "T01:01:01Z"); - assertValidDateFormatParsing("strictTTimeNoMillis", "T12:12:12Z"); - assertDateFormatParsingThrowingException("strictTTimeNoMillis", "T12:12:1Z"); - assertDateFormatParsingThrowingException("strictTTimeNoMillis", "T12:1:12Z"); - assertDateFormatParsingThrowingException("strictTTimeNoMillis", "T1:12:12Z"); - - // xxxx-'W’ww-e - assertValidDateFormatParsing("weekDate", "0005-W4-1", "0005-W04-1"); - assertValidDateFormatParsing("strictWeekDate", "0005-W04-1"); - assertDateFormatParsingThrowingException("strictWeekDate", "0005-W4-1"); - - // xxxx-'W’ww-e’T'HH:mm:ss.SSSZZ - assertValidDateFormatParsing("weekDateTime", "0005-W41-4T12:43:43.123Z"); - assertValidDateFormatParsing("weekDateTime", "5-W41-4T12:43:43.123Z", "0005-W41-4T12:43:43.123Z"); - assertValidDateFormatParsing("strictWeekDateTime", "0005-W41-4T12:43:43.123Z"); - assertValidDateFormatParsing("strictWeekDateTime", "0005-W06-4T12:43:43.123Z"); - assertDateFormatParsingThrowingException("strictWeekDateTime", "0005-W4-7T12:43:43.123Z"); - assertDateFormatParsingThrowingException("strictWeekDateTime", "5-W41-4T12:43:43.123Z"); - assertDateFormatParsingThrowingException("strictWeekDateTime", "5-W1-4T12:43:43.123Z"); - - // xxxx-'W’ww-e’T'HH:mm:ssZZ - assertValidDateFormatParsing("weekDateTimeNoMillis", "0005-W41-4T12:43:43Z"); - assertValidDateFormatParsing("weekDateTimeNoMillis", "5-W41-4T12:43:43Z", "0005-W41-4T12:43:43Z"); - assertValidDateFormatParsing("strictWeekDateTimeNoMillis", "0005-W41-4T12:43:43Z"); - assertValidDateFormatParsing("strictWeekDateTimeNoMillis", "0005-W06-4T12:43:43Z"); - assertDateFormatParsingThrowingException("strictWeekDateTimeNoMillis", "0005-W4-7T12:43:43Z"); - assertDateFormatParsingThrowingException("strictWeekDateTimeNoMillis", "5-W41-4T12:43:43Z"); - assertDateFormatParsingThrowingException("strictWeekDateTimeNoMillis", "5-W1-4T12:43:43Z"); - - // yyyy - assertValidDateFormatParsing("weekyear", "2014"); - assertValidDateFormatParsing("weekyear", "5", "0005"); - assertValidDateFormatParsing("weekyear", "0005"); - assertValidDateFormatParsing("strictWeekyear", "2014"); - assertValidDateFormatParsing("strictWeekyear", "0005"); - assertDateFormatParsingThrowingException("strictWeekyear", "5"); - - // yyyy-'W'ee - assertValidDateFormatParsing("weekyearWeek", "2014-W41"); - assertValidDateFormatParsing("weekyearWeek", "2014-W1", "2014-W01"); - assertValidDateFormatParsing("strictWeekyearWeek", "2014-W41"); - assertDateFormatParsingThrowingException("strictWeekyearWeek", "2014-W1"); - - // weekyearWeekDay - assertValidDateFormatParsing("weekyearWeekDay", "2014-W41-1"); - assertValidDateFormatParsing("weekyearWeekDay", "2014-W1-1", "2014-W01-1"); - assertValidDateFormatParsing("strictWeekyearWeekDay", "2014-W41-1"); - assertDateFormatParsingThrowingException("strictWeekyearWeekDay", "2014-W1-1"); - - // yyyy - assertValidDateFormatParsing("year", "2014"); - assertValidDateFormatParsing("year", "5", "0005"); - assertValidDateFormatParsing("strictYear", "2014"); - assertDateFormatParsingThrowingException("strictYear", "5"); - - // yyyy-mm - assertValidDateFormatParsing("yearMonth", "2014-12"); - assertValidDateFormatParsing("yearMonth", "2014-5", "2014-05"); - assertValidDateFormatParsing("strictYearMonth", "2014-12"); - assertDateFormatParsingThrowingException("strictYearMonth", "2014-5"); - - // yyyy-mm-dd - assertValidDateFormatParsing("yearMonthDay", "2014-12-12"); - assertValidDateFormatParsing("yearMonthDay", "2014-05-5", "2014-05-05"); - assertValidDateFormatParsing("strictYearMonthDay", "2014-12-12"); - assertDateFormatParsingThrowingException("strictYearMonthDay", "2014-05-5"); - } - - public void testThatRootObjectParsingIsStrict() throws Exception { - String[] datesThatWork = new String[] { "2014/10/10", "2014/10/10 12:12:12", "2014-05-05", "2014-05-05T12:12:12.123Z" }; - String[] datesThatShouldNotWork = new String[]{ "5-05-05", "2014-5-05", "2014-05-5", - "2014-05-05T1:12:12.123Z", "2014-05-05T12:1:12.123Z", "2014-05-05T12:12:1.123Z", - "4/10/10", "2014/1/10", "2014/10/1", - "2014/10/10 1:12:12", "2014/10/10 12:1:12", "2014/10/10 12:12:1" - }; - - // good case - for (String date : datesThatWork) { - boolean dateParsingSuccessful = false; - for (DateFormatter dateTimeFormatter : RootObjectMapper.Defaults.DYNAMIC_DATE_TIME_FORMATTERS) { - try { - dateTimeFormatter.parseMillis(date); - dateParsingSuccessful = true; - break; - } catch (Exception e) {} - } - if (!dateParsingSuccessful) { - fail("Parsing for date " + date + " in root object mapper failed, but shouldnt"); - } - } - - // bad case - for (String date : datesThatShouldNotWork) { - for (DateFormatter dateTimeFormatter : RootObjectMapper.Defaults.DYNAMIC_DATE_TIME_FORMATTERS) { - try { - dateTimeFormatter.parseMillis(date); - fail(String.format(Locale.ROOT, "Expected exception when parsing date %s in root mapper", date)); - } catch (Exception e) {} - } - } - } - - public void testDeprecatedFormatSpecifiers() { - Joda.forPattern("CC"); - assertWarnings("Use of 'C' (century-of-era) is deprecated and will not be supported in the" + - " next major version of Elasticsearch."); - Joda.forPattern("YYYY"); - assertWarnings("Use of 'Y' (year-of-era) will change to 'y' in the" + - " next major version of Elasticsearch. Prefix your date format with '8' to use the new specifier."); - Joda.forPattern("xxxx"); - assertWarnings("Use of 'x' (week-based-year) will change" + - " to 'Y' in the next major version of Elasticsearch. Prefix your date format with '8' to use the new specifier."); - // multiple deprecations - Joda.forPattern("CC-YYYY"); - assertWarnings("Use of 'C' (century-of-era) is deprecated and will not be supported in the" + - " next major version of Elasticsearch.", "Use of 'Y' (year-of-era) will change to 'y' in the" + - " next major version of Elasticsearch. Prefix your date format with '8' to use the new specifier."); - } - - public void testDeprecatedEpochScientificNotation() { - assertValidDateFormatParsing("epoch_second", "1.234e5", "123400"); - assertWarnings("Use of scientific notation" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - assertValidDateFormatParsing("epoch_millis", "1.234e5", "123400"); - assertWarnings("Use of scientific notation" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - } - - public void testDeprecatedEpochNegative() { - assertValidDateFormatParsing("epoch_second", "-12345", "-12345"); - assertWarnings("Use of negative values" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - assertValidDateFormatParsing("epoch_millis", "-12345", "-12345"); - assertWarnings("Use of negative values" + - " in epoch time formats is deprecated and will not be supported in the next major version of Elasticsearch."); - } - - private void assertValidDateFormatParsing(String pattern, String dateToParse) { - assertValidDateFormatParsing(pattern, dateToParse, dateToParse); - } - - private void assertValidDateFormatParsing(String pattern, String dateToParse, String expectedDate) { - DateFormatter formatter = DateFormatter.forPattern(pattern); - assertThat(formatter.formatMillis(formatter.parseMillis(dateToParse)), is(expectedDate)); - } - - private void assertDateFormatParsingThrowingException(String pattern, String invalidDate) { - try { - DateFormatter formatter = DateFormatter.forPattern(pattern); - formatter.parseMillis(invalidDate); - fail(String.format(Locale.ROOT, "Expected parsing exception for pattern [%s] with date [%s], but did not happen", - pattern, invalidDate)); - } catch (IllegalArgumentException e) { - } - } - - private long utcTimeInMillis(String time) { - return ISODateTimeFormat.dateOptionalTimeParser().withZone(DateTimeZone.UTC).parseMillis(time); - } - -} diff --git a/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java b/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java index 7e3dbdd5b94df..3ee4ce0e7d7bf 100644 --- a/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java +++ b/server/src/test/java/org/elasticsearch/common/rounding/RoundingDuelTests.java @@ -19,9 +19,11 @@ package org.elasticsearch.common.rounding; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import java.time.ZoneOffset; @@ -42,6 +44,7 @@ public void testSerialization() throws Exception { rounding = org.elasticsearch.common.Rounding.builder(timeValue()).timeZone(ZoneOffset.UTC).build(); } BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(VersionUtils.getPreviousVersion(Version.V_7_0_0)); rounding.writeTo(output); Rounding roundingJoda = Rounding.Streams.read(output.bytes().streamInput()); diff --git a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java index feb406c61c966..96ef39e430178 100644 --- a/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/DateFormattersTests.java @@ -25,7 +25,6 @@ import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.time.format.DateTimeParseException; import java.time.temporal.ChronoField; import java.time.temporal.TemporalAccessor; import java.util.Locale; @@ -57,13 +56,13 @@ public void testEpochMillisParser() { } } - public void testEpochMilliParser() { + public void testInvalidEpochMilliParser() { DateFormatter formatter = DateFormatters.forPattern("epoch_millis"); - DateTimeParseException e = expectThrows(DateTimeParseException.class, () -> formatter.parse("invalid")); - assertThat(e.getMessage(), containsString("could not be parsed")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("invalid")); + assertThat(e.getMessage(), containsString("failed to parse date field [invalid] with format [epoch_millis]")); - e = expectThrows(DateTimeParseException.class, () -> formatter.parse("123.1234567")); - assertThat(e.getMessage(), containsString("unparsed text found at index 3")); + e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("123.1234567")); + assertThat(e.getMessage(), containsString("failed to parse date field [123.1234567] with format [epoch_millis]")); } // this is not in the duelling tests, because the epoch second parser in joda time drops the milliseconds after the comma @@ -72,14 +71,14 @@ public void testEpochMilliParser() { public void testEpochSecondParser() { DateFormatter formatter = DateFormatters.forPattern("epoch_second"); - DateTimeParseException e = expectThrows(DateTimeParseException.class, () -> formatter.parse("1234.1")); - assertThat(e.getMessage(), is("Text '1234.1' could not be parsed, unparsed text found at index 4")); - e = expectThrows(DateTimeParseException.class, () -> formatter.parse("1234.")); - assertThat(e.getMessage(), is("Text '1234.' could not be parsed, unparsed text found at index 4")); - e = expectThrows(DateTimeParseException.class, () -> formatter.parse("abc")); - assertThat(e.getMessage(), is("Text 'abc' could not be parsed, unparsed text found at index 0")); - e = expectThrows(DateTimeParseException.class, () -> formatter.parse("1234.abc")); - assertThat(e.getMessage(), is("Text '1234.abc' could not be parsed, unparsed text found at index 4")); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> formatter.parse("1234.1234567890")); + assertThat(e.getMessage(), is("failed to parse date field [1234.1234567890] with format [epoch_second]")); + e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("1234.123456789013221")); + assertThat(e.getMessage(), containsString("[1234.123456789013221]")); + e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("abc")); + assertThat(e.getMessage(), containsString("[abc]")); + e = expectThrows(IllegalArgumentException .class, () -> formatter.parse("1234.abc")); + assertThat(e.getMessage(), containsString("[1234.abc]")); } public void testEpochMilliParsersWithDifferentFormatters() { @@ -139,7 +138,7 @@ public void testEqualsAndHashcode() { assertThat(epochMillisFormatter, equalTo(DateFormatters.forPattern("epoch_millis"))); } - public void testForceJava8() { + public void testSupportBackwardsJava8Format() { assertThat(DateFormatter.forPattern("8yyyy-MM-dd"), instanceOf(JavaDateFormatter.class)); // named formats too assertThat(DateFormatter.forPattern("8date_optional_time"), instanceOf(JavaDateFormatter.class)); @@ -161,27 +160,29 @@ public void testParsingStrictNanoDates() { } public void testRoundupFormatterWithEpochDates() { - assertRoundupFormatter("8epoch_millis", "1234567890", 1234567890L); + assertRoundupFormatter("epoch_millis", "1234567890", 1234567890L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second DateTimeFormatter roundUpFormatter = ((JavaDateFormatter) DateFormatter.forPattern("8epoch_millis")).getRoundupParser(); Instant epochMilliInstant = DateFormatters.toZonedDateTime(roundUpFormatter.parse("1234567890")).toInstant(); assertThat(epochMilliInstant.getLong(ChronoField.NANO_OF_SECOND), is(890_999_999L)); - assertRoundupFormatter("8strict_date_optional_time||epoch_millis", "2018-10-10T12:13:14.123Z", 1539173594123L); - assertRoundupFormatter("8strict_date_optional_time||epoch_millis", "1234567890", 1234567890L); - assertRoundupFormatter("8uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_millis", "2018-10-10T12:13:14.123", 1539173594123L); - assertRoundupFormatter("8uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_millis", "1234567890", 1234567890L); + assertRoundupFormatter("strict_date_optional_time||epoch_millis", "2018-10-10T12:13:14.123Z", 1539173594123L); + assertRoundupFormatter("strict_date_optional_time||epoch_millis", "1234567890", 1234567890L); + assertRoundupFormatter("strict_date_optional_time||epoch_millis", "2018-10-10", 1539215999999L); + assertRoundupFormatter("uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_millis", "2018-10-10T12:13:14.123", 1539173594123L); + assertRoundupFormatter("uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_millis", "1234567890", 1234567890L); - assertRoundupFormatter("8epoch_second", "1234567890", 1234567890999L); + assertRoundupFormatter("epoch_second", "1234567890", 1234567890999L); // also check nanos of the epoch_millis formatter if it is rounded up to the nano second DateTimeFormatter epochSecondRoundupParser = ((JavaDateFormatter) DateFormatter.forPattern("8epoch_second")).getRoundupParser(); Instant epochSecondInstant = DateFormatters.toZonedDateTime(epochSecondRoundupParser.parse("1234567890")).toInstant(); assertThat(epochSecondInstant.getLong(ChronoField.NANO_OF_SECOND), is(999_999_999L)); - assertRoundupFormatter("8strict_date_optional_time||epoch_second", "2018-10-10T12:13:14.123Z", 1539173594123L); - assertRoundupFormatter("8strict_date_optional_time||epoch_second", "1234567890", 1234567890999L); - assertRoundupFormatter("8uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_second", "2018-10-10T12:13:14.123", 1539173594123L); - assertRoundupFormatter("8uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_second", "1234567890", 1234567890999L); + assertRoundupFormatter("strict_date_optional_time||epoch_second", "2018-10-10T12:13:14.123Z", 1539173594123L); + assertRoundupFormatter("strict_date_optional_time||epoch_second", "1234567890", 1234567890999L); + assertRoundupFormatter("strict_date_optional_time||epoch_second", "2018-10-10", 1539215999999L); + assertRoundupFormatter("uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_second", "2018-10-10T12:13:14.123", 1539173594123L); + assertRoundupFormatter("uuuu-MM-dd'T'HH:mm:ss.SSS||epoch_second", "1234567890", 1234567890999L); } private void assertRoundupFormatter(String format, String input, long expectedMilliSeconds) { @@ -194,8 +195,8 @@ private void assertRoundupFormatter(String format, String input, long expectedMi public void testRoundupFormatterZone() { ZoneId zoneId = randomZone(); - String format = randomFrom("8epoch_second", "8epoch_millis", "8strict_date_optional_time", "8uuuu-MM-dd'T'HH:mm:ss.SSS", - "8strict_date_optional_time||date_optional_time"); + String format = randomFrom("epoch_second", "epoch_millis", "strict_date_optional_time", "uuuu-MM-dd'T'HH:mm:ss.SSS", + "strict_date_optional_time||date_optional_time"); JavaDateFormatter formatter = (JavaDateFormatter) DateFormatter.forPattern(format).withZone(zoneId); DateTimeFormatter roundUpFormatter = formatter.getRoundupParser(); assertThat(roundUpFormatter.getZone(), is(zoneId)); @@ -204,8 +205,8 @@ public void testRoundupFormatterZone() { public void testRoundupFormatterLocale() { Locale locale = randomLocale(random()); - String format = randomFrom("8epoch_second", "8epoch_millis", "8strict_date_optional_time", "8uuuu-MM-dd'T'HH:mm:ss.SSS", - "8strict_date_optional_time||date_optional_time"); + String format = randomFrom("epoch_second", "epoch_millis", "strict_date_optional_time", "uuuu-MM-dd'T'HH:mm:ss.SSS", + "strict_date_optional_time||date_optional_time"); JavaDateFormatter formatter = (JavaDateFormatter) DateFormatter.forPattern(format).withLocale(locale); DateTimeFormatter roundupParser = formatter.getRoundupParser(); assertThat(roundupParser.getLocale(), is(locale)); diff --git a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java index 8d702ebee8388..2b8d89bc68bae 100644 --- a/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java +++ b/server/src/test/java/org/elasticsearch/common/time/JavaDateMathParserTests.java @@ -39,8 +39,6 @@ public class JavaDateMathParserTests extends ESTestCase { private final DateMathParser parser = formatter.toDateMathParser(); public void testBasicDates() { - assertDateMathEquals("2014", "2014-01-01T00:00:00.000"); - assertDateMathEquals("2014-05", "2014-05-01T00:00:00.000"); assertDateMathEquals("2014-05-30", "2014-05-30T00:00:00.000"); assertDateMathEquals("2014-05-30T20", "2014-05-30T20:00:00.000"); assertDateMathEquals("2014-05-30T20:21", "2014-05-30T20:21:00.000"); @@ -125,7 +123,7 @@ public void testMultipleAdjustments() { } public void testNow() { - final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, (ZoneId) null); + final long now = parser.parse("2014-11-18T14:27:32", () -> 0, false, (ZoneId) null).toEpochMilli(); assertDateMathEquals("now", "2014-11-18T14:27:32", now, false, null); assertDateMathEquals("now+M", "2014-12-18T14:27:32", now, false, null); @@ -142,11 +140,11 @@ public void testRoundingPreservesEpochAsBaseDate() { DateMathParser parser = formatter.toDateMathParser(); ZonedDateTime zonedDateTime = DateFormatters.toZonedDateTime(formatter.parse("04:52:20")); assertThat(zonedDateTime.getYear(), is(1970)); - long millisStart = zonedDateTime.toInstant().toEpochMilli(); + Instant millisStart = zonedDateTime.toInstant(); assertEquals(millisStart, parser.parse("04:52:20", () -> 0, false, (ZoneId) null)); // due to rounding up, we have to add the number of milliseconds here manually long millisEnd = DateFormatters.toZonedDateTime(formatter.parse("04:52:20")).toInstant().toEpochMilli() + 999; - assertEquals(millisEnd, parser.parse("04:52:20", () -> 0, true, (ZoneId) null)); + assertEquals(millisEnd, parser.parse("04:52:20", () -> 0, true, (ZoneId) null).toEpochMilli()); } // Implicit rounding happening when parts of the date are not specified @@ -166,9 +164,10 @@ public void testImplicitRounding() { // implicit rounding with explicit timezone in the date format DateFormatter formatter = DateFormatters.forPattern("yyyy-MM-ddXXX"); DateMathParser parser = formatter.toDateMathParser(); - long time = parser.parse("2011-10-09+01:00", () -> 0, false, (ZoneId) null); + Instant time = parser.parse("2011-10-09+01:00", () -> 0, false, (ZoneId) null); assertEquals(this.parser.parse("2011-10-09T00:00:00.000+01:00", () -> 0), time); - time = parser.parse("2011-10-09+01:00", () -> 0, true, (ZoneId) null); + time = DateFormatter.forPattern("strict_date_optional_time_nanos").toDateMathParser() + .parse("2011-10-09T23:59:59.999+01:00", () -> 0, false, (ZoneId) null); assertEquals(this.parser.parse("2011-10-09T23:59:59.999+01:00", () -> 0), time); } @@ -176,7 +175,6 @@ public void testImplicitRounding() { public void testExplicitRounding() { assertDateMathEquals("2014-11-18||/y", "2014-01-01", 0, false, null); assertDateMathEquals("2014-11-18||/y", "2014-12-31T23:59:59.999", 0, true, null); - assertDateMathEquals("2014||/y", "2014-01-01", 0, false, null); assertDateMathEquals("2014-01-01T00:00:00.001||/y", "2014-12-31T23:59:59.999", 0, true, null); // rounding should also take into account time zone assertDateMathEquals("2014-11-18||/y", "2013-12-31T23:00:00.000Z", 0, false, ZoneId.of("CET")); @@ -239,16 +237,16 @@ public void testTimestamps() { assertDateMathEquals("1418248078000||/m", "2014-12-10T21:47:00.000"); // also check other time units - DateMathParser parser = DateFormatter.forPattern("8epoch_second||dateOptionalTime").toDateMathParser(); - long datetime = parser.parse("1418248078", () -> 0); + DateMathParser parser = DateFormatter.forPattern("epoch_second||dateOptionalTime").toDateMathParser(); + long datetime = parser.parse("1418248078", () -> 0).toEpochMilli(); assertDateEquals(datetime, "1418248078", "2014-12-10T21:47:58.000"); // a timestamp before 10000 is a year - assertDateMathEquals("9999", "9999-01-01T00:00:00.000"); + assertDateMathEquals("9999", "1970-01-01T00:00:09.999Z"); // 10000 is also a year, breaking bwc, used to be a timestamp - assertDateMathEquals("10000", "10000-01-01T00:00:00.000"); + assertDateMathEquals("10000", "1970-01-01T00:00:10.000Z"); // but 10000 with T is still a date format - assertDateMathEquals("10000T", "10000-01-01T00:00:00.000"); + assertDateMathEquals("10000-01-01T", "10000-01-01T00:00:00.000"); } void assertParseException(String msg, String date, String exc) { @@ -266,7 +264,7 @@ public void testIllegalMathFormat() { public void testIllegalDateFormat() { assertParseException("Expected bad timestamp exception", Long.toString(Long.MAX_VALUE) + "0", "failed to parse date field"); - assertParseException("Expected bad date format exception", "123bogus", "Unrecognized chars at the end of [123bogus]"); + assertParseException("Expected bad date format exception", "123bogus", "failed to parse date field [123bogus]"); } public void testOnlyCallsNowIfNecessary() { @@ -286,12 +284,12 @@ private void assertDateMathEquals(String toTest, String expected) { } private void assertDateMathEquals(String toTest, String expected, final long now, boolean roundUp, ZoneId timeZone) { - long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone); + long gotMillis = parser.parse(toTest, () -> now, roundUp, timeZone).toEpochMilli(); assertDateEquals(gotMillis, toTest, expected); } private void assertDateEquals(long gotMillis, String original, String expected) { - long expectedMillis = parser.parse(expected, () -> 0); + long expectedMillis = parser.parse(expected, () -> 0).toEpochMilli(); if (gotMillis != expectedMillis) { ZonedDateTime zonedDateTime = ZonedDateTime.ofInstant(Instant.ofEpochMilli(gotMillis), ZoneOffset.UTC); fail("Date math not equal\n" + diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java index 8b437d25a8495..38b3d5a2f1ff2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldMapperTests.java @@ -21,21 +21,23 @@ import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; import org.junit.Before; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.Collection; import static org.hamcrest.Matchers.containsString; @@ -173,7 +175,8 @@ public void testIgnoreMalformed() throws Exception { .endObject()), XContentType.JSON)); MapperParsingException e = expectThrows(MapperParsingException.class, runnable); - assertThat(e.getCause().getMessage(), containsString("Cannot parse \"2016-03-99\"")); + assertThat(e.getCause().getMessage(), + containsString("failed to parse date field [2016-03-99] with format [strict_date_optional_time||epoch_millis]")); mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "date") @@ -217,36 +220,13 @@ public void testChangeFormat() throws IOException { assertEquals(1457654400000L, pointField.numericValue().longValue()); } - public void testFloatEpochFormat() throws IOException { - String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "date") - .field("format", "epoch_millis").endObject().endObject() - .endObject().endObject()); - - DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); - - assertEquals(mapping, mapper.mappingSource().toString()); - - long epochMillis = randomNonNegativeLong(); - String epochFloatValue = epochMillis + "." + randomIntBetween(0, 999); - - ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference - .bytes(XContentFactory.jsonBuilder() - .startObject() - .field("field", epochFloatValue) - .endObject()), - XContentType.JSON)); - - IndexableField[] fields = doc.rootDoc().getFields("field"); - assertEquals(2, fields.length); - IndexableField pointField = fields[0]; - assertEquals(epochMillis, pointField.numericValue().longValue()); - } - public void testChangeLocale() throws IOException { + assumeTrue("need java 9 for testing ",JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("properties").startObject("field").field("type", "date").field("locale", "fr").endObject().endObject() - .endObject().endObject()); + .startObject("properties").startObject("field").field("type", "date") + .field("format", "E, d MMM yyyy HH:mm:ss Z") + .field("locale", "de") + .endObject().endObject().endObject().endObject()); DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); @@ -255,7 +235,7 @@ public void testChangeLocale() throws IOException { mapper.parse(new SourceToParse("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() .startObject() - .field("field", 1457654400) + .field("field", "Mi., 06 Dez. 2000 02:55:00 -0800") .endObject()), XContentType.JSON)); } @@ -340,12 +320,8 @@ public void testEmptyName() throws IOException { assertThat(e.getMessage(), containsString("name cannot be empty string")); } - /** - * Test that time zones are correctly parsed by the {@link DateFieldMapper}. - * There is a known bug with Joda 2.9.4 reported in https://github.com/JodaOrg/joda-time/issues/373. - */ public void testTimeZoneParsing() throws Exception { - final String timeZonePattern = "yyyy-MM-dd" + randomFrom("ZZZ", "[ZZZ]", "'['ZZZ']'"); + final String timeZonePattern = "yyyy-MM-dd" + randomFrom("XXX", "[XXX]", "'['XXX']'"); String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() .startObject("type") @@ -360,20 +336,22 @@ public void testTimeZoneParsing() throws Exception { DocumentMapper mapper = parser.parse("type", new CompressedXContent(mapping)); assertEquals(mapping, mapper.mappingSource().toString()); - final DateTimeZone randomTimeZone = randomBoolean() ? DateTimeZone.forID(randomFrom("UTC", "CET")) : randomDateTimeZone(); - final DateTime randomDate = new DateTime(2016, 03, 11, 0, 0, 0, randomTimeZone); + DateFormatter formatter = DateFormatter.forPattern(timeZonePattern); + final ZoneId randomTimeZone = randomBoolean() ? ZoneId.of(randomFrom("UTC", "CET")) : randomZone(); + final ZonedDateTime randomDate = ZonedDateTime.of(2016, 3, 11, 0, 0, 0, 0, randomTimeZone); ParsedDocument doc = mapper.parse(new SourceToParse("test", "type", "1", BytesReference .bytes(XContentFactory.jsonBuilder() .startObject() - .field("field", DateTimeFormat.forPattern(timeZonePattern).print(randomDate)) + .field("field", formatter.format(randomDate)) .endObject()), XContentType.JSON)); IndexableField[] fields = doc.rootDoc().getFields("field"); assertEquals(2, fields.length); - assertEquals(randomDate.withZone(DateTimeZone.UTC).getMillis(), fields[0].numericValue().longValue()); + long millis = randomDate.withZoneSameInstant(ZoneOffset.UTC).toInstant().toEpochMilli(); + assertEquals(millis, fields[0].numericValue().longValue()); } public void testMergeDate() throws IOException { @@ -429,6 +407,6 @@ public void testIllegalFormatField() throws Exception { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parser.parse("type", new CompressedXContent(mapping))); - assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + assertEquals("Invalid format: [[test_format]]: Unknown pattern letter: t", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java index 072170aff09dd..d4058d50f74a2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DateFieldTypeTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.index.IndexSettings; @@ -45,6 +46,7 @@ import org.junit.Before; import java.io.IOException; +import java.time.ZoneOffset; import java.util.Locale; public class DateFieldTypeTests extends FieldTypeTestCase { @@ -67,7 +69,7 @@ public void modify(MappedFieldType ft) { addModifier(new Modifier("locale", false) { @Override public void modify(MappedFieldType ft) { - ((DateFieldType) ft).setDateTimeFormatter(DateFormatter.forPattern("date_optional_time").withLocale(Locale.CANADA)); + ((DateFieldType) ft).setDateTimeFormatter(DateFormatter.forPattern("strict_date_optional_time").withLocale(Locale.CANADA)); } }); nowInMillis = randomNonNegativeLong(); @@ -110,8 +112,10 @@ private void doTestIsFieldWithinQuery(DateFieldType ft, DirectoryReader reader, public void testIsFieldWithinQuery() throws IOException { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); - long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda("2015-10-12").getMillis(); - long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda("2016-04-03").getMillis(); + long instant1 = + DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-12")).toInstant().toEpochMilli(); + long instant2 = + DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2016-04-03")).toInstant().toEpochMilli(); Document doc = new Document(); LongPoint field = new LongPoint("my_date", instant1); doc.add(field); @@ -138,25 +142,27 @@ public void testIsFieldWithinQuery() throws IOException { public void testValueFormat() { MappedFieldType ft = createDefaultFieldType(); - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda("2015-10-12T14:10:55").getMillis(); + long instant = DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-12T14:10:55")) + .toInstant().toEpochMilli(); + assertEquals("2015-10-12T14:10:55.000Z", - ft.docValueFormat(null, DateTimeZone.UTC).format(instant)); + ft.docValueFormat(null, ZoneOffset.UTC).format(instant)); assertEquals("2015-10-12T15:10:55.000+01:00", - ft.docValueFormat(null, DateTimeZone.forOffsetHours(1)).format(instant)); + ft.docValueFormat(null, ZoneOffset.ofHours(1)).format(instant)); assertEquals("2015", - createDefaultFieldType().docValueFormat("yyyy", DateTimeZone.UTC).format(instant)); + createDefaultFieldType().docValueFormat("YYYY", ZoneOffset.UTC).format(instant)); assertEquals(instant, - ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12T14:10:55", false, null)); + ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", false, null)); assertEquals(instant + 999, - ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12T14:10:55", true, null)); - assertEquals(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda("2015-10-13").getMillis() - 1, - ft.docValueFormat(null, DateTimeZone.UTC).parseLong("2015-10-12||/d", true, null)); + ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12T14:10:55", true, null)); + long i = DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse("2015-10-13")).toInstant().toEpochMilli(); + assertEquals(i - 1, ft.docValueFormat(null, ZoneOffset.UTC).parseLong("2015-10-12||/d", true, null)); } public void testValueForSearch() { MappedFieldType ft = createDefaultFieldType(); String date = "2015-10-12T12:09:55.000Z"; - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date).getMillis(); + long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(date); assertEquals(date, ft.valueForDisplay(instant)); } @@ -170,7 +176,7 @@ public void testTermQuery() { MappedFieldType ft = createDefaultFieldType(); ft.setName("field"); String date = "2015-10-12T14:10:55"; - long instant = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date).getMillis(); + long instant = DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)).toInstant().toEpochMilli(); ft.setIndexOptions(IndexOptions.DOCS); Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant, instant + 999), @@ -193,8 +199,9 @@ public void testRangeQuery() throws IOException { ft.setName("field"); String date1 = "2015-10-12T14:10:55"; String date2 = "2016-04-28T11:33:52"; - long instant1 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date1).getMillis(); - long instant2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date2).getMillis() + 999; + long instant1 = DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date1)).toInstant().toEpochMilli(); + long instant2 = + DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date2)).toInstant().toEpochMilli() + 999; ft.setIndexOptions(IndexOptions.DOCS); Query expected = new IndexOrDocValuesQuery( LongPoint.newRangeQuery("field", instant1, instant2), diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 56e6f5e4c6b04..b3539d9994334 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.test.InternalSettingsPlugin; import java.io.IOException; +import java.time.Instant; import java.util.Collection; import java.util.Collections; @@ -455,7 +456,7 @@ public void testReuseExistingMappings() throws IOException, Exception { .field("my_field3", 44) .field("my_field4", 45) .field("my_field5", 46) - .field("my_field6", 47) + .field("my_field6", Instant.now().toEpochMilli()) .field("my_field7", true) .endObject()); Mapper myField1Mapper = null; diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java index ff44dec81d962..0b066fbd7162d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicTemplatesTests.java @@ -60,8 +60,6 @@ public void testMatchTypeOnly() throws Exception { assertThat(mapperService.fullName("l"), notNullValue()); assertNotSame(IndexOptions.NONE, mapperService.fullName("l").indexOptions()); - - } public void testSimple() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java index fcb78f66add5e..65dcd396ed740 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldMapperTests.java @@ -458,7 +458,7 @@ public void testIllegalFormatField() throws Exception { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> parser.parse("type", new CompressedXContent(mapping))); - assertEquals("Invalid format: [[test_format]]: expected string value", e.getMessage()); + assertEquals("Invalid format: [[test_format]]: Unknown pattern letter: t", e.getMessage()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java index 34e7081d51d5d..699f85f1b12b1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldQueryStringQueryBuilderTests.java @@ -104,11 +104,12 @@ public void testDateRangeQuery() throws Exception { DateMathParser parser = type.dateMathParser; Query query = new QueryStringQueryBuilder(DATE_RANGE_FIELD_NAME + ":[2010-01-01 TO 2018-01-01]").toQuery(createShardContext()); Query range = LongRange.newIntersectsQuery(DATE_RANGE_FIELD_NAME, - new long[]{ parser.parse("2010-01-01", () -> 0)}, new long[]{ parser.parse("2018-01-01", () -> 0)}); + new long[]{ parser.parse("2010-01-01", () -> 0).toEpochMilli()}, + new long[]{ parser.parse("2018-01-01", () -> 0).toEpochMilli()}); Query dv = RangeFieldMapper.RangeType.DATE.dvRangeQuery(DATE_RANGE_FIELD_NAME, BinaryDocValuesRangeQuery.QueryType.INTERSECTS, - parser.parse("2010-01-01", () -> 0), - parser.parse("2018-01-01", () -> 0), true, true); + parser.parse("2010-01-01", () -> 0).toEpochMilli(), + parser.parse("2018-01-01", () -> 0).toEpochMilli(), true, true); assertEquals(new IndexOrDocValuesQuery(range, dv), query); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java index f04a193ef96b2..6ca98fb4db6d2 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/RangeFieldTypeTests.java @@ -48,6 +48,8 @@ import java.net.InetAddress; import java.util.Locale; +import static org.hamcrest.Matchers.containsString; + public class RangeFieldTypeTests extends FieldTypeTestCase { RangeType type; protected static String FIELDNAME = "field"; @@ -111,17 +113,18 @@ public void testDateRangeQueryUsingMappingFormat() { fieldType.setHasDocValues(false); ShapeRelation relation = randomFrom(ShapeRelation.values()); - // dates will break the default format + // dates will break the default format, month/day of month is turned around in the format final String from = "2016-15-06T15:29:50+08:00"; final String to = "2016-16-06T15:29:50+08:00"; ElasticsearchParseException ex = expectThrows(ElasticsearchParseException.class, () -> fieldType.rangeQuery(from, to, true, true, relation, null, null, context)); - assertEquals("failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_optional_time||epoch_millis]", - ex.getMessage()); + assertThat(ex.getMessage(), + containsString("failed to parse date field [2016-15-06T15:29:50+08:00] with format [strict_date_optional_time||epoch_millis]") + ); // setting mapping format which is compatible with those dates - final DateFormatter formatter = DateFormatter.forPattern("yyyy-dd-MM'T'HH:mm:ssZZ"); + final DateFormatter formatter = DateFormatter.forPattern("yyyy-dd-MM'T'HH:mm:ssZZZZZ"); assertEquals(1465975790000L, formatter.parseMillis(from)); assertEquals(1466062190000L, formatter.parseMillis(to)); diff --git a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java index 0eb6de7da252f..6f72277007dd5 100644 --- a/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/QueryStringQueryBuilderTests.java @@ -64,9 +64,10 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.hamcrest.Matchers; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.DateTimeException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -177,7 +178,7 @@ protected QueryStringQueryBuilder doCreateTestQueryBuilder() { queryStringQueryBuilder.minimumShouldMatch(randomMinimumShouldMatch()); } if (randomBoolean()) { - queryStringQueryBuilder.timeZone(randomDateTimeZone().getID()); + queryStringQueryBuilder.timeZone(randomZone().getId()); } if (randomBoolean()) { queryStringQueryBuilder.autoGenerateSynonymsPhraseQuery(randomBoolean()); @@ -211,7 +212,7 @@ public QueryStringQueryBuilder mutateInstance(QueryStringQueryBuilder instance) String quoteFieldSuffix = instance.quoteFieldSuffix(); Float tieBreaker = instance.tieBreaker(); String minimumShouldMatch = instance.minimumShouldMatch(); - String timeZone = instance.timeZone() == null ? null : instance.timeZone().getID(); + String timeZone = instance.timeZone() == null ? null : instance.timeZone().getId(); boolean autoGenerateSynonymsPhraseQuery = instance.autoGenerateSynonymsPhraseQuery(); boolean fuzzyTranspositions = instance.fuzzyTranspositions(); @@ -319,12 +320,12 @@ public QueryStringQueryBuilder mutateInstance(QueryStringQueryBuilder instance) break; case 20: if (timeZone == null) { - timeZone = randomDateTimeZone().getID(); + timeZone = randomZone().getId(); } else { if (randomBoolean()) { timeZone = null; } else { - timeZone = randomValueOtherThan(timeZone, () -> randomDateTimeZone().getID()); + timeZone = randomValueOtherThan(timeZone, () -> randomZone().getId()); } } break; @@ -848,7 +849,7 @@ public void testTimezone() throws Exception { QueryBuilder queryBuilder = parseQuery(queryAsString); assertThat(queryBuilder, instanceOf(QueryStringQueryBuilder.class)); QueryStringQueryBuilder queryStringQueryBuilder = (QueryStringQueryBuilder) queryBuilder; - assertThat(queryStringQueryBuilder.timeZone(), equalTo(DateTimeZone.forID("Europe/Paris"))); + assertThat(queryStringQueryBuilder.timeZone(), equalTo(ZoneId.of("Europe/Paris"))); String invalidQueryAsString = "{\n" + " \"query_string\":{\n" + @@ -856,7 +857,7 @@ public void testTimezone() throws Exception { " \"query\":\"" + DATE_FIELD_NAME + ":[2012 TO 2014]\"\n" + " }\n" + "}"; - expectThrows(IllegalArgumentException.class, () -> parseQuery(invalidQueryAsString)); + expectThrows(DateTimeException.class, () -> parseQuery(invalidQueryAsString)); } public void testToQueryBooleanQueryMultipleBoosts() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java index df312ba84c309..52f2c89d645f9 100644 --- a/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/RangeQueryBuilderTests.java @@ -44,10 +44,12 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.AbstractQueryTestCase; import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import org.joda.time.chrono.ISOChronology; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.HashMap; import java.util.Map; @@ -72,18 +74,22 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { break; case 1: // use mapped date field, using date string representation + Instant now = Instant.now(); + ZonedDateTime start = now.minusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); + ZonedDateTime end = now.plusMillis(randomIntBetween(0, 1000000)).atZone(ZoneOffset.UTC); query = new RangeQueryBuilder(randomFrom( DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, DATE_ALIAS_FIELD_NAME)); - query.from(new DateTime(System.currentTimeMillis() - randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); - query.to(new DateTime(System.currentTimeMillis() + randomIntBetween(0, 1000000), DateTimeZone.UTC).toString()); + query.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(start)); + query.to(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(end)); // Create timestamp option only then we have a date mapper, // otherwise we could trigger exception. if (createShardContext().getMapperService().fullName(DATE_FIELD_NAME) != null) { if (randomBoolean()) { - query.timeZone(randomDateTimeZone().getID()); + query.timeZone(randomZone().getId()); } if (randomBoolean()) { - query.format("yyyy-MM-dd'T'HH:mm:ss.SSSZZ"); + String format = "strict_date_optional_time"; + query.format(format); } } break; @@ -444,7 +450,7 @@ protected MappedFieldType.Relation getRelation(QueryRewriteContext queryRewriteC DateTime queryToValue = new DateTime(2016, 1, 1, 0, 0, 0, ISOChronology.getInstanceUTC()); query.from(queryFromValue); query.to(queryToValue); - query.timeZone(randomDateTimeZone().getID()); + query.timeZone(randomZone().getId()); query.format("yyyy-MM-dd"); QueryShardContext queryShardContext = createShardContext(); QueryBuilder rewritten = query.rewrite(queryShardContext); diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java index 6d1db852a85a4..7e13b38fd3d25 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesRequestCacheIT.java @@ -26,6 +26,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.cache.request.RequestCacheStats; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -34,8 +35,8 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.joda.time.DateTimeZone; +import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; @@ -68,7 +69,7 @@ public void testCacheAggs() throws Exception { // which used to not work well with the query cache because of the handles stream output // see #9500 final SearchResponse r1 = client.prepareSearch("index").setSize(0).setSearchType(SearchType.QUERY_THEN_FETCH) - .addAggregation(dateHistogram("histo").field("f").timeZone(DateTimeZone.forID("+01:00")).minDocCount(0) + .addAggregation(dateHistogram("histo").field("f").timeZone(ZoneId.of("+01:00")).minDocCount(0) .dateHistogramInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(r1); @@ -80,7 +81,7 @@ public void testCacheAggs() throws Exception { for (int i = 0; i < 10; ++i) { final SearchResponse r2 = client.prepareSearch("index").setSize(0) .setSearchType(SearchType.QUERY_THEN_FETCH).addAggregation(dateHistogram("histo").field("f") - .timeZone(DateTimeZone.forID("+01:00")).minDocCount(0).dateHistogramInterval(DateHistogramInterval.MONTH)) + .timeZone(ZoneId.of("+01:00")).minDocCount(0).dateHistogramInterval(DateHistogramInterval.MONTH)) .get(); assertSearchResponse(r2); Histogram h1 = r1.getAggregations().get("histo"); @@ -246,15 +247,16 @@ public void testQueryRewriteDatesWithNow() throws Exception { assertAcked(client.admin().indices().prepareCreate("index-3").addMapping("type", "d", "type=date") .setSettings(settings).get()); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); - indexRandom(true, client.prepareIndex("index-1", "type", "1").setSource("d", now), - client.prepareIndex("index-1", "type", "2").setSource("d", now.minusDays(1)), - client.prepareIndex("index-1", "type", "3").setSource("d", now.minusDays(2)), - client.prepareIndex("index-2", "type", "4").setSource("d", now.minusDays(3)), - client.prepareIndex("index-2", "type", "5").setSource("d", now.minusDays(4)), - client.prepareIndex("index-2", "type", "6").setSource("d", now.minusDays(5)), - client.prepareIndex("index-3", "type", "7").setSource("d", now.minusDays(6)), - client.prepareIndex("index-3", "type", "8").setSource("d", now.minusDays(7)), - client.prepareIndex("index-3", "type", "9").setSource("d", now.minusDays(8))); + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time"); + indexRandom(true, client.prepareIndex("index-1", "type", "1").setSource("d", formatter.format(now)), + client.prepareIndex("index-1", "type", "2").setSource("d", formatter.format(now.minusDays(1))), + client.prepareIndex("index-1", "type", "3").setSource("d", formatter.format(now.minusDays(2))), + client.prepareIndex("index-2", "type", "4").setSource("d", formatter.format(now.minusDays(3))), + client.prepareIndex("index-2", "type", "5").setSource("d", formatter.format(now.minusDays(4))), + client.prepareIndex("index-2", "type", "6").setSource("d", formatter.format(now.minusDays(5))), + client.prepareIndex("index-3", "type", "7").setSource("d", formatter.format(now.minusDays(6))), + client.prepareIndex("index-3", "type", "8").setSource("d", formatter.format(now.minusDays(7))), + client.prepareIndex("index-3", "type", "9").setSource("d", formatter.format(now.minusDays(8)))); ensureSearchable("index-1", "index-2", "index-3"); assertCacheState(client, "index-1", 0, 0); assertCacheState(client, "index-2", 0, 0); diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index 81f5c7982d4d8..4486d4ff83ffb 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -29,8 +29,8 @@ import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; @@ -60,15 +60,15 @@ public void testSerialization() throws Exception { assertEquals(DocValueFormat.Decimal.class, vf.getClass()); assertEquals("###.##", ((DocValueFormat.Decimal) vf).pattern); - DocValueFormat.DateTime dateFormat = - new DocValueFormat.DateTime(DateFormatter.forPattern("epoch_second"), DateTimeZone.forOffsetHours(1)); + DateFormatter formatter = DateFormatter.forPattern("epoch_second"); + DocValueFormat.DateTime dateFormat = new DocValueFormat.DateTime(formatter, ZoneOffset.ofHours(1)); out = new BytesStreamOutput(); out.writeNamedWriteable(dateFormat); in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry); vf = in.readNamedWriteable(DocValueFormat.class); assertEquals(DocValueFormat.DateTime.class, vf.getClass()); assertEquals("epoch_second", ((DocValueFormat.DateTime) vf).formatter.pattern()); - assertEquals(DateTimeZone.forOffsetHours(1), ((DocValueFormat.DateTime) vf).timeZone); + assertEquals(ZoneOffset.ofHours(1), ((DocValueFormat.DateTime) vf).timeZone); out = new BytesStreamOutput(); out.writeNamedWriteable(DocValueFormat.GEOHASH); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java index 3a10edf183376..a54f30ffac0d1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/AutoDateHistogramTests.java @@ -36,7 +36,7 @@ protected AutoDateHistogramAggregationBuilder createTestAggregatorBuilder() { builder.missing(randomIntBetween(0, 10)); } if (randomBoolean()) { - builder.timeZone(randomDateTimeZone()); + builder.timeZone(randomZone()); } return builder; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java index f65f2bde9662a..c59be546acd1a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramIT.java @@ -22,12 +22,12 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.DateMathParser; -import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.query.MatchNoneQueryBuilder; @@ -46,13 +46,14 @@ import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; import org.junit.After; import java.io.IOException; +import java.time.Instant; import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -83,21 +84,21 @@ @ESIntegTestCase.SuiteScopeTestCase public class DateHistogramIT extends ESIntegTestCase { - static Map> expectedMultiSortBuckets; + static Map> expectedMultiSortBuckets; - private DateTime date(int month, int day) { - return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); + private ZonedDateTime date(int month, int day) { + return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } - private DateTime date(String date) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date); + private ZonedDateTime date(String date) { + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); } - private static String format(DateTime date, String pattern) { - return DateTimeFormat.forPattern(pattern).print(date); + private static String format(ZonedDateTime date, String pattern) { + return DateFormatter.forPattern(pattern).format(date); } - private IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception { + private IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { return client().prepareIndex(idx, "type").setSource(jsonBuilder() .startObject() .timeField("date", date) @@ -142,7 +143,7 @@ public void setupSuiteScopeCluster() throws Exception { ensureSearchable(); } - private void addExpectedBucket(DateTime key, long docCount, double avg, double sum) { + private void addExpectedBucket(ZonedDateTime key, long docCount, double avg, double sum) { Map bucketProps = new HashMap<>(); bucketProps.put("_count", docCount); bucketProps.put("avg_l", avg); @@ -196,13 +197,12 @@ public void afterEachTest() throws IOException { internalCluster().wipeIndices("idx2"); } - private static String getBucketKeyAsString(DateTime key) { - return getBucketKeyAsString(key, DateTimeZone.UTC); + private static String getBucketKeyAsString(ZonedDateTime key) { + return getBucketKeyAsString(key, ZoneOffset.UTC); } - private static String getBucketKeyAsString(DateTime key, DateTimeZone tz) { - ZoneId zoneId = DateUtils.dateTimeZoneToZoneId(tz); - return DateFormatter.forPattern(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.pattern()).withZone(zoneId).formatJoda(key); + private static String getBucketKeyAsString(ZonedDateTime key, ZoneId tz) { + return DateFormatter.forPattern("strict_date_optional_time").withZone(tz).format(key); } public void testSingleValuedField() throws Exception { @@ -218,35 +218,34 @@ public void testSingleValuedField() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } public void testSingleValuedFieldWithTimeZone() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date") - .dateHistogramInterval(DateHistogramInterval.DAY) - .minDocCount(1) - .timeZone(DateTimeZone.forID("+01:00"))).get(); - DateTimeZone tz = DateTimeZone.forID("+01:00"); + .addAggregation(dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1) + .timeZone(ZoneId.of("+01:00"))).execute() + .actionGet(); + ZoneId tz = ZoneId.of("+01:00"); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); @@ -255,46 +254,46 @@ public void testSingleValuedFieldWithTimeZone() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(6)); - DateTime key = new DateTime(2012, 1, 1, 23, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 14, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 1, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 14, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(4); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 22, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(5); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); } @@ -304,7 +303,7 @@ public void testSingleValued_timeZone_epoch() throws Exception { if (randomBoolean()) { format = format + "||date_optional_time"; } - DateTimeZone tz = DateTimeZone.forID("+01:00"); + ZoneId tz = ZoneId.of("+01:00"); SearchResponse response = client().prepareSearch("idx") .addAggregation(dateHistogram("histo").field("date") .dateHistogramInterval(DateHistogramInterval.DAY).minDocCount(1) @@ -318,21 +317,25 @@ public void testSingleValued_timeZone_epoch() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(6)); - List expectedKeys = new ArrayList<>(); - expectedKeys.add(new DateTime(2012, 1, 1, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 2, 1, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 2, 14, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 3, 1, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 3, 14, 23, 0, DateTimeZone.UTC)); - expectedKeys.add(new DateTime(2012, 3, 22, 23, 0, DateTimeZone.UTC)); - + List expectedKeys = new ArrayList<>(); + expectedKeys.add(ZonedDateTime.of(2012, 1, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 2, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 2, 14, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 1, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 14, 23, 0, 0, 0, ZoneOffset.UTC)); + expectedKeys.add(ZonedDateTime.of(2012, 3, 22, 23, 0, 0, 0, ZoneOffset.UTC)); - Iterator keyIterator = expectedKeys.iterator(); + Iterator keyIterator = expectedKeys.iterator(); for (Histogram.Bucket bucket : buckets) { assertThat(bucket, notNullValue()); - DateTime expectedKey = keyIterator.next(); - assertThat(bucket.getKeyAsString(), equalTo(Long.toString(expectedKey.getMillis() / millisDivider))); - assertThat(((DateTime) bucket.getKey()), equalTo(expectedKey)); + ZonedDateTime expectedKey = keyIterator.next(); + String bucketKey = bucket.getKeyAsString(); + String expectedBucketName = Long.toString(expectedKey.toInstant().toEpochMilli() / millisDivider); + if (JavaVersion.current().getVersion().get(0) == 8 && bucket.getKeyAsString().endsWith(".0")) { + expectedBucketName = expectedBucketName + ".0"; + } + assertThat(bucketKey, equalTo(expectedBucketName)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(expectedKey)); assertThat(bucket.getDocCount(), equalTo(1L)); } } @@ -355,7 +358,7 @@ public void testSingleValuedFieldOrderedByKeyAsc() throws Exception { int i = 0; for (Histogram.Bucket bucket : buckets) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } } @@ -377,7 +380,7 @@ public void testSingleValuedFieldOrderedByKeyDesc() throws Exception { int i = 2; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } } @@ -399,7 +402,7 @@ public void testSingleValuedFieldOrderedByCountAsc() throws Exception { int i = 0; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } } @@ -421,7 +424,7 @@ public void testSingleValuedFieldOrderedByCountDesc() throws Exception { int i = 2; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } } @@ -444,42 +447,42 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)histo).getProperty("_count"); Object[] propertiesCounts = (Object[]) ((InternalAggregation)histo).getProperty("sum.value"); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); Sum sum = bucket.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getValue(), equalTo(1.0)); - assertThat((DateTime) propertiesKeys[0], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[0], equalTo(key)); assertThat((long) propertiesDocCounts[0], equalTo(1L)); assertThat((double) propertiesCounts[0], equalTo(1.0)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); sum = bucket.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getValue(), equalTo(5.0)); - assertThat((DateTime) propertiesKeys[1], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); assertThat((long) propertiesDocCounts[1], equalTo(2L)); assertThat((double) propertiesCounts[1], equalTo(5.0)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); sum = bucket.getAggregations().get("sum"); assertThat(sum, notNullValue()); assertThat(sum.getValue(), equalTo(15.0)); - assertThat((DateTime) propertiesKeys[2], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); assertThat((long) propertiesDocCounts[2], equalTo(3L)); assertThat((double) propertiesCounts[2], equalTo(15.0)); } @@ -502,7 +505,7 @@ public void testSingleValuedFieldOrderedBySubAggregationAsc() throws Exception { int i = 0; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i++; } } @@ -525,7 +528,7 @@ public void testSingleValuedFieldOrderedBySubAggregationDesc() throws Exception int i = 2; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } } @@ -548,7 +551,7 @@ public void testSingleValuedFieldOrderedByMultiValuedSubAggregationDesc() throws int i = 2; for (Histogram.Bucket bucket : histo.getBuckets()) { - assertThat(((DateTime) bucket.getKey()), equalTo(new DateTime(2012, i + 1, 1, 0, 0, DateTimeZone.UTC))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(ZonedDateTime.of(2012, i + 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))); i--; } } @@ -625,25 +628,25 @@ public void testSingleValuedFieldWithValueScript() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -669,32 +672,32 @@ public void testMultiValuedField() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -763,32 +766,32 @@ public void testMultiValuedFieldWithValueScript() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - DateTime key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); - key = new DateTime(2012, 5, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 5, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -817,25 +820,25 @@ public void testScriptSingleValue() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -856,32 +859,32 @@ public void testScriptMultiValued() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -922,25 +925,25 @@ public void testPartiallyUnmapped() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -971,7 +974,7 @@ public void testEmptyAggregation() throws Exception { public void testSingleValueWithTimeZone() throws Exception { prepareCreate("idx2").addMapping("type", "date", "type=date").get(); IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; - DateTime date = date("2014-03-11T00:00:00+00:00"); + ZonedDateTime date = date("2014-03-11T00:00:00+00:00"); for (int i = 0; i < reqs.length; i++) { reqs[i] = client().prepareIndex("idx2", "type", "" + i) .setSource(jsonBuilder().startObject().timeField("date", date).endObject()); @@ -983,9 +986,9 @@ public void testSingleValueWithTimeZone() throws Exception { .setQuery(matchAllQuery()) .addAggregation(dateHistogram("date_histo") .field("date") - .timeZone(DateTimeZone.forID("-02:00")) + .timeZone(ZoneId.of("-02:00")) .dateHistogramInterval(DateHistogramInterval.DAY) - .format("yyyy-MM-dd:HH-mm-ssZZ")) + .format("yyyy-MM-dd:HH-mm-ssZZZZZ")) .get(); assertThat(response.getHits().getTotalHits().value, equalTo(5L)); @@ -1010,8 +1013,9 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { // we're testing on days, so the base must be rounded to a day int interval = randomIntBetween(1, 2); // in days long intervalMillis = interval * 24 * 60 * 60 * 1000; - DateTime base = new DateTime(DateTimeZone.UTC).dayOfMonth().roundFloorCopy(); - DateTime baseKey = new DateTime(intervalMillis * (base.getMillis() / intervalMillis), DateTimeZone.UTC); + ZonedDateTime base = ZonedDateTime.now(ZoneOffset.UTC).withDayOfMonth(1); + ZonedDateTime baseKey = Instant.ofEpochMilli(intervalMillis * (base.toInstant().toEpochMilli() / intervalMillis)) + .atZone(ZoneOffset.UTC); prepareCreate("idx2") .setSettings( @@ -1028,7 +1032,7 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { } else { int docCount = randomIntBetween(1, 3); for (int j = 0; j < docCount; j++) { - DateTime date = baseKey.plusDays(i * interval + randomIntBetween(0, interval - 1)); + ZonedDateTime date = baseKey.plusDays(i * interval + randomIntBetween(0, interval - 1)); builders.add(indexDoc("idx2", date, j)); } docCounts[i] = docCount; @@ -1037,19 +1041,19 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { indexRandom(true, builders); ensureSearchable("idx2"); - DateTime lastDataBucketKey = baseKey.plusDays((numOfBuckets - 1) * interval); + ZonedDateTime lastDataBucketKey = baseKey.plusDays((numOfBuckets - 1) * interval); // randomizing the number of buckets on the min bound // (can sometimes fall within the data range, but more frequently will fall before the data range) int addedBucketsLeft = randomIntBetween(0, numOfBuckets); - DateTime boundsMinKey; + ZonedDateTime boundsMinKey; if (frequently()) { boundsMinKey = baseKey.minusDays(addedBucketsLeft * interval); } else { boundsMinKey = baseKey.plusDays(addedBucketsLeft * interval); addedBucketsLeft = 0; } - DateTime boundsMin = boundsMinKey.plusDays(randomIntBetween(0, interval - 1)); + ZonedDateTime boundsMin = boundsMinKey.plusDays(randomIntBetween(0, interval - 1)); // randomizing the number of buckets on the max bound // (can sometimes fall within the data range, but more frequently will fall after the data range) @@ -1059,8 +1063,8 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { addedBucketsRight = 0; boundsMaxKeyDelta = -boundsMaxKeyDelta; } - DateTime boundsMaxKey = lastDataBucketKey.plusDays(boundsMaxKeyDelta); - DateTime boundsMax = boundsMaxKey.plusDays(randomIntBetween(0, interval - 1)); + ZonedDateTime boundsMaxKey = lastDataBucketKey.plusDays(boundsMaxKeyDelta); + ZonedDateTime boundsMax = boundsMaxKey.plusDays(randomIntBetween(0, interval - 1)); // it could be that the random bounds.min we chose ended up greater than // bounds.max - this should @@ -1105,11 +1109,11 @@ public void testSingleValueFieldWithExtendedBounds() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(bucketsCount)); - DateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; + ZonedDateTime key = baseKey.isBefore(boundsMinKey) ? baseKey : boundsMinKey; for (int i = 0; i < bucketsCount; i++) { Histogram.Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getKeyAsString(), equalTo(format(key, pattern))); assertThat(bucket.getDocCount(), equalTo(extendedValueCounts[i])); key = key.plusDays(interval); @@ -1126,15 +1130,15 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { .setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", 1).put("index.number_of_replicas", 0)) .get(); - DateMathParser parser = Joda.getStrictStandardDateFormatter().toDateMathParser(); + DateMathParser parser = DateFormatter.forPattern("yyyy/MM/dd HH:mm:ss||yyyy/MM/dd||epoch_millis").toDateMathParser(); // we pick a random timezone offset of +12/-12 hours and insert two documents // one at 00:00 in that time zone and one at 12:00 List builders = new ArrayList<>(); int timeZoneHourOffset = randomIntBetween(-12, 12); - DateTimeZone timezone = DateTimeZone.forOffsetHours(timeZoneHourOffset); - DateTime timeZoneStartToday = new DateTime(parser.parse("now/d", System::currentTimeMillis, false, timezone), DateTimeZone.UTC); - DateTime timeZoneNoonToday = new DateTime(parser.parse("now/d+12h", System::currentTimeMillis, false, timezone), DateTimeZone.UTC); + ZoneId timezone = ZoneOffset.ofHours(timeZoneHourOffset); + ZonedDateTime timeZoneStartToday = parser.parse("now/d", System::currentTimeMillis, false, timezone).atZone(ZoneOffset.UTC); + ZonedDateTime timeZoneNoonToday = parser.parse("now/d+12h", System::currentTimeMillis, false, timezone).atZone(ZoneOffset.UTC); builders.add(indexDoc(index, timeZoneStartToday, 1)); builders.add(indexDoc(index, timeZoneNoonToday, 2)); indexRandom(true, builders); @@ -1145,7 +1149,7 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { response = client() .prepareSearch(index) .setQuery(QueryBuilders.rangeQuery("date") - .from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getID())) + .from("now/d").to("now/d").includeLower(true).includeUpper(true).timeZone(timezone.getId())) .addAggregation( dateHistogram("histo").field("date").dateHistogramInterval(DateHistogramInterval.hours(1)) .timeZone(timezone).minDocCount(0).extendedBounds(new ExtendedBounds("now/d", "now/d+23h")) @@ -1164,8 +1168,8 @@ public void testSingleValueFieldWithExtendedBoundsTimezone() throws Exception { for (int i = 0; i < buckets.size(); i++) { Histogram.Bucket bucket = buckets.get(i); assertThat(bucket, notNullValue()); - assertThat("InternalBucket " + i + " had wrong key", (DateTime) bucket.getKey(), - equalTo(new DateTime(timeZoneStartToday.getMillis() + (i * 60 * 60 * 1000), DateTimeZone.UTC))); + ZonedDateTime zonedDateTime = timeZoneStartToday.plus(i * 60 * 60 * 1000, ChronoUnit.MILLIS); + assertThat("InternalBucket " + i + " had wrong key", (ZonedDateTime) bucket.getKey(), equalTo(zonedDateTime)); if (i == 0 || i == 12) { assertThat(bucket.getDocCount(), equalTo(1L)); } else { @@ -1186,10 +1190,11 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { .get(); List builders = new ArrayList<>(); - builders.add(indexDoc(index, DateTime.parse("2016-01-03T08:00:00.000Z"), 1)); - builders.add(indexDoc(index, DateTime.parse("2016-01-03T08:00:00.000Z"), 2)); - builders.add(indexDoc(index, DateTime.parse("2016-01-06T08:00:00.000Z"), 3)); - builders.add(indexDoc(index, DateTime.parse("2016-01-06T08:00:00.000Z"), 4)); + DateFormatter formatter = DateFormatter.forPattern("date_optional_time"); + builders.add(indexDoc(index, DateFormatters.toZonedDateTime(formatter.parse("2016-01-03T08:00:00.000Z")), 1)); + builders.add(indexDoc(index, DateFormatters.toZonedDateTime(formatter.parse("2016-01-03T08:00:00.000Z")), 2)); + builders.add(indexDoc(index, DateFormatters.toZonedDateTime(formatter.parse("2016-01-06T08:00:00.000Z")), 3)); + builders.add(indexDoc(index, DateFormatters.toZonedDateTime(formatter.parse("2016-01-06T08:00:00.000Z")), 4)); indexRandom(true, builders); ensureSearchable(index); @@ -1233,7 +1238,7 @@ public void testSingleValueFieldWithExtendedBoundsOffset() throws Exception { public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception { String mappingJson = Strings.toString(jsonBuilder().startObject() .startObject("type").startObject("properties") - .startObject("date").field("type", "date").field("format", "dateOptionalTime||dd-MM-yyyy") + .startObject("date").field("type", "date").field("format", "strict_date_optional_time||dd-MM-yyyy") .endObject().endObject().endObject().endObject()); prepareCreate("idx2").addMapping("type", mappingJson, XContentType.JSON).get(); IndexRequestBuilder[] reqs = new IndexRequestBuilder[5]; @@ -1256,23 +1261,23 @@ public void testSingleValueWithMultipleDateFormatsFromMapping() throws Exception List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(1)); - DateTime key = new DateTime(2014, 3, 10, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2014, 3, 10, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); } public void testIssue6965() { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").timeZone(DateTimeZone.forID("+01:00")) - .dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0)) + .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("+01:00")) + .dateHistogramInterval(DateHistogramInterval.MONTH).minDocCount(0)) .get(); assertSearchResponse(response); - DateTimeZone tz = DateTimeZone.forID("+01:00"); + ZoneId tz = ZoneId.of("+01:00"); Histogram histo = response.getAggregations().get("histo"); assertThat(histo, notNullValue()); @@ -1280,25 +1285,25 @@ public void testIssue6965() { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2011, 12, 31, 23, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2011, 12, 31, 23, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); - key = new DateTime(2012, 1, 31, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 1, 31, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); - key = new DateTime(2012, 2, 29, 23, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 29, 23, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat(bucket.getKeyAsString(), equalTo(getBucketKeyAsString(key, tz))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); } @@ -1309,7 +1314,7 @@ public void testDSTBoundaryIssue9491() throws InterruptedException, ExecutionExc ensureSearchable("test9491"); SearchResponse response = client().prepareSearch("test9491") .addAggregation(dateHistogram("histo").field("d").dateHistogramInterval(DateHistogramInterval.YEAR) - .timeZone(DateTimeZone.forID("Asia/Jerusalem"))) + .timeZone(ZoneId.of("Asia/Jerusalem")).format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX")) .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); @@ -1327,8 +1332,8 @@ public void testIssue8209() throws InterruptedException, ExecutionException { ensureSearchable("test8209"); SearchResponse response = client().prepareSearch("test8209") .addAggregation(dateHistogram("histo").field("d").dateHistogramInterval(DateHistogramInterval.MONTH) - .timeZone(DateTimeZone.forID("CET")) - .minDocCount(0)) + .format("yyyy-MM-dd'T'HH:mm:ss.SSSXXXXX") + .timeZone(ZoneId.of("CET")).minDocCount(0)) .get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); @@ -1371,7 +1376,7 @@ public void testFormatIndexUnmapped() throws InterruptedException, ExecutionExce SearchResponse response = client().prepareSearch(indexDateUnmapped) .addAggregation( - dateHistogram("histo").field("dateField").dateHistogramInterval(DateHistogramInterval.MONTH).format("YYYY-MM") + dateHistogram("histo").field("dateField").dateHistogramInterval(DateHistogramInterval.MONTH).format("yyyy-MM") .minDocCount(0).extendedBounds(new ExtendedBounds("2018-01", "2018-01"))) .get(); assertSearchResponse(response); @@ -1393,15 +1398,19 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, indexRandom(true, client().prepareIndex(index, "type").setSource("d", "1477954800000")); ensureSearchable(index); SearchResponse response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") - .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(DateTimeZone.forID("Europe/Berlin"))).get(); + .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin"))).get(); assertSearchResponse(response); Histogram histo = response.getAggregations().get("histo"); assertThat(histo.getBuckets().size(), equalTo(1)); - assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); + if (JavaVersion.current().getVersion().get(0) == 8 && histo.getBuckets().get(0).getKeyAsString().endsWith(".0")) { + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000.0")); + } else { + assertThat(histo.getBuckets().get(0).getKeyAsString(), equalTo("1477954800000")); + } assertThat(histo.getBuckets().get(0).getDocCount(), equalTo(1L)); response = client().prepareSearch(index).addAggregation(dateHistogram("histo").field("d") - .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(DateTimeZone.forID("Europe/Berlin")).format("yyyy-MM-dd")) + .dateHistogramInterval(DateHistogramInterval.MONTH).timeZone(ZoneId.of("Europe/Berlin")).format("yyyy-MM-dd")) .get(); assertSearchResponse(response); histo = response.getAggregations().get("histo"); @@ -1422,7 +1431,7 @@ public void testRewriteTimeZone_EpochMillisFormat() throws InterruptedException, public void testDSTEndTransition() throws Exception { SearchResponse response = client().prepareSearch("idx") .setQuery(new MatchNoneQueryBuilder()) - .addAggregation(dateHistogram("histo").field("date").timeZone(DateTimeZone.forID("Europe/Oslo")) + .addAggregation(dateHistogram("histo").field("date").timeZone(ZoneId.of("Europe/Oslo")) .dateHistogramInterval(DateHistogramInterval.HOUR).minDocCount(0).extendedBounds( new ExtendedBounds("2015-10-25T02:00:00.000+02:00", "2015-10-25T04:00:00.000+01:00"))) .get(); @@ -1430,9 +1439,12 @@ public void testDSTEndTransition() throws Exception { Histogram histo = response.getAggregations().get("histo"); List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertThat(((DateTime) buckets.get(1).getKey()).getMillis() - ((DateTime) buckets.get(0).getKey()).getMillis(), equalTo(3600000L)); - assertThat(((DateTime) buckets.get(2).getKey()).getMillis() - ((DateTime) buckets.get(1).getKey()).getMillis(), equalTo(3600000L)); - assertThat(((DateTime) buckets.get(3).getKey()).getMillis() - ((DateTime) buckets.get(2).getKey()).getMillis(), equalTo(3600000L)); + assertThat(((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(0).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + assertThat(((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(1).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); + assertThat(((ZonedDateTime) buckets.get(3).getKey()).toInstant().toEpochMilli() - + ((ZonedDateTime) buckets.get(2).getKey()).toInstant().toEpochMilli(), equalTo(3600000L)); } /** @@ -1443,8 +1455,10 @@ public void testDontCacheScripts() throws Exception { assertAcked(prepareCreate("cache_test_idx").addMapping("type", "d", "type=date") .setSettings(Settings.builder().put("requests.cache.enable", true).put("number_of_shards", 1).put("number_of_replicas", 1)) .get()); - indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("d", date(1, 1)), - client().prepareIndex("cache_test_idx", "type", "2").setSource("d", date(2, 1))); + String date = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(1, 1)); + String date2 = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.format(date(2, 1)); + indexRandom(true, client().prepareIndex("cache_test_idx", "type", "1").setSource("d", date), + client().prepareIndex("cache_test_idx", "type", "2").setSource("d", date2)); // Make sure we are starting with a clear cache assertThat(client().admin().indices().prepareStats("cache_test_idx").setRequestCache(true).get().getTotal().getRequestCache() @@ -1514,7 +1528,7 @@ public void testSingleValuedFieldOrderedBySingleValueSubAggregationAscAsCompound } private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { - DateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(DateTime[]::new); + ZonedDateTime[] expectedKeys = Arrays.stream(expectedDays).mapToObj(d -> date(1, d)).toArray(ZonedDateTime[]::new); SearchResponse response = client() .prepareSearch("sort_idx") .setTypes("type") @@ -1544,7 +1558,7 @@ private void assertMultiSortResponse(int[] expectedDays, BucketOrder... order) { } } - private DateTime key(Histogram.Bucket bucket) { - return (DateTime) bucket.getKey(); + private ZonedDateTime key(Histogram.Bucket bucket) { + return (ZonedDateTime) bucket.getKey(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java index 44a9f8c2cb126..080c4faffd696 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramOffsetIT.java @@ -20,18 +20,19 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.test.ESIntegTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.Before; import java.io.IOException; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.List; -import java.util.concurrent.ExecutionException; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; @@ -50,9 +51,10 @@ public class DateHistogramOffsetIT extends ESIntegTestCase { private static final String DATE_FORMAT = "yyyy-MM-dd:hh-mm-ss"; + private static final DateFormatter FORMATTER = DateFormatter.forPattern(DATE_FORMAT); - private DateTime date(String date) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date); + private ZonedDateTime date(String date) { + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(date)); } @Before @@ -65,8 +67,9 @@ public void afterEachTest() throws IOException { internalCluster().wipeIndices("idx2"); } - private void prepareIndex(DateTime date, int numHours, int stepSizeHours, int idxIdStart) - throws IOException, InterruptedException, ExecutionException { + private void prepareIndex(ZonedDateTime date, int numHours, int stepSizeHours, int idxIdStart) + throws IOException, InterruptedException { + IndexRequestBuilder[] reqs = new IndexRequestBuilder[numHours]; for (int i = idxIdStart; i < idxIdStart + reqs.length; i++) { reqs[i - idxIdStart] = client().prepareIndex("idx2", "type", "" + i) @@ -94,8 +97,8 @@ public void testSingleValueWithPositiveOffset() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(2)); - checkBucketFor(buckets.get(0), new DateTime(2014, 3, 10, 2, 0, DateTimeZone.UTC), 2L); - checkBucketFor(buckets.get(1), new DateTime(2014, 3, 11, 2, 0, DateTimeZone.UTC), 3L); + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 2, 0, 0, 0, ZoneOffset.UTC), 2L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 2, 0, 0, 0, ZoneOffset.UTC), 3L); } public void testSingleValueWithNegativeOffset() throws Exception { @@ -116,8 +119,8 @@ public void testSingleValueWithNegativeOffset() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(2)); - checkBucketFor(buckets.get(0), new DateTime(2014, 3, 9, 22, 0, DateTimeZone.UTC), 2L); - checkBucketFor(buckets.get(1), new DateTime(2014, 3, 10, 22, 0, DateTimeZone.UTC), 3L); + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 9, 22, 0, 0, 0, ZoneOffset.UTC), 2L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 10, 22, 0, 0, 0, ZoneOffset.UTC), 3L); } /** @@ -143,11 +146,11 @@ public void testSingleValueWithOffsetMinDocCount() throws Exception { List buckets = histo.getBuckets(); assertThat(buckets.size(), equalTo(5)); - checkBucketFor(buckets.get(0), new DateTime(2014, 3, 10, 6, 0, DateTimeZone.UTC), 6L); - checkBucketFor(buckets.get(1), new DateTime(2014, 3, 11, 6, 0, DateTimeZone.UTC), 6L); - checkBucketFor(buckets.get(2), new DateTime(2014, 3, 12, 6, 0, DateTimeZone.UTC), 0L); - checkBucketFor(buckets.get(3), new DateTime(2014, 3, 13, 6, 0, DateTimeZone.UTC), 6L); - checkBucketFor(buckets.get(4), new DateTime(2014, 3, 14, 6, 0, DateTimeZone.UTC), 6L); + checkBucketFor(buckets.get(0), ZonedDateTime.of(2014, 3, 10, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(1), ZonedDateTime.of(2014, 3, 11, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(2), ZonedDateTime.of(2014, 3, 12, 6, 0, 0, 0, ZoneOffset.UTC), 0L); + checkBucketFor(buckets.get(3), ZonedDateTime.of(2014, 3, 13, 6, 0, 0, 0, ZoneOffset.UTC), 6L); + checkBucketFor(buckets.get(4), ZonedDateTime.of(2014, 3, 14, 6, 0, 0, 0, ZoneOffset.UTC), 6L); } /** @@ -155,10 +158,10 @@ public void testSingleValueWithOffsetMinDocCount() throws Exception { * @param key the expected key * @param expectedSize the expected size of the bucket */ - private static void checkBucketFor(Histogram.Bucket bucket, DateTime key, long expectedSize) { + private static void checkBucketFor(Histogram.Bucket bucket, ZonedDateTime key, long expectedSize) { assertThat(bucket, notNullValue()); - assertThat(bucket.getKeyAsString(), equalTo(key.toString(DATE_FORMAT))); - assertThat(((DateTime) bucket.getKey()), equalTo(key)); + assertThat(bucket.getKeyAsString(), equalTo(FORMATTER.format(key))); + assertThat(((ZonedDateTime) bucket.getKey()), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(expectedSize)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java index 91834de935b4f..f50c0bfd072b1 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeIT.java @@ -18,9 +18,11 @@ */ package org.elasticsearch.search.aggregations.bucket; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.Script; @@ -33,9 +35,10 @@ import org.elasticsearch.search.aggregations.metrics.Sum; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -70,12 +73,12 @@ private static IndexRequestBuilder indexDoc(int month, int day, int value) throw .endObject()); } - private static DateTime date(int month, int day) { - return date(month, day, DateTimeZone.UTC); + private static ZonedDateTime date(int month, int day) { + return date(month, day, ZoneOffset.UTC); } - private static DateTime date(int month, int day, DateTimeZone timezone) { - return new DateTime(2012, month, day, 0, 0, timezone); + private static ZonedDateTime date(int month, int day, ZoneId timezone) { + return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, timezone); } private static int numDocs; @@ -128,7 +131,7 @@ public void testDateMath() throws Exception { .prepareSearch("idx") .addAggregation( rangeBuilder.addUnboundedTo("a long time ago", "now-50y").addRange("recently", "now-50y", "now-1y") - .addUnboundedFrom("last year", "now-1y").timeZone(DateTimeZone.forID("EST"))).get(); + .addUnboundedFrom("last year", "now-1y").timeZone(ZoneId.of("Etc/GMT+5"))).get(); assertSearchResponse(response); @@ -176,8 +179,8 @@ public void testSingleValueField() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -185,8 +188,8 @@ public void testSingleValueField() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -194,8 +197,8 @@ public void testSingleValueField() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -222,8 +225,8 @@ public void testSingleValueFieldWithStringDates() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -231,8 +234,8 @@ public void testSingleValueFieldWithStringDates() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -240,8 +243,8 @@ public void testSingleValueFieldWithStringDates() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -269,8 +272,8 @@ public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Excepti Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -278,8 +281,8 @@ public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Excepti bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15-2012-03-15")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15")); assertThat(bucket.getToAsString(), equalTo("2012-03-15")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -287,19 +290,17 @@ public void testSingleValueFieldWithStringDatesWithCustomFormat() throws Excepti bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); } public void testSingleValueFieldWithDateMath() throws Exception { - DateTimeZone timezone = randomDateTimeZone(); - int timeZoneOffset = timezone.getOffset(date(2, 15)); - // if time zone is UTC (or equivalent), time zone suffix is "Z", else something like "+03:00", which we get with the "ZZ" format - String feb15Suffix = timeZoneOffset == 0 ? "Z" : date(2,15, timezone).toString("ZZ"); - String mar15Suffix = timeZoneOffset == 0 ? "Z" : date(3,15, timezone).toString("ZZ"); + ZoneId timezone = randomZone(); + int timeZoneOffset = timezone.getRules().getOffset(date(2, 15).toInstant()).getTotalSeconds(); + String suffix = timezone.equals(ZoneOffset.UTC) ? "Z" : timezone.getId(); long expectedFirstBucketCount = timeZoneOffset < 0 ? 3L : 2L; SearchResponse response = client().prepareSearch("idx") @@ -321,29 +322,29 @@ public void testSingleValueFieldWithDateMath() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15, timezone).toDateTime(DateTimeZone.UTC))); + assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000" + suffix)); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); assertThat(bucket.getFromAsString(), nullValue()); - assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000" + suffix)); assertThat(bucket.getDocCount(), equalTo(expectedFirstBucketCount)); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix + - "-2012-03-15T00:00:00.000" + mar15Suffix)); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).toDateTime(DateTimeZone.UTC))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15, timezone).toDateTime(DateTimeZone.UTC))); - assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + feb15Suffix)); - assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000" + suffix + + "-2012-03-15T00:00:00.000" + suffix)); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000" + suffix)); + assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000" + suffix)); assertThat(bucket.getDocCount(), equalTo(2L)); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix + "-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).toDateTime(DateTimeZone.UTC))); - assertThat(((DateTime) bucket.getTo()), nullValue()); - assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + mar15Suffix)); + assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000" + suffix + "-*")); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15, timezone).withZoneSameInstant(ZoneOffset.UTC))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); + assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000" + suffix)); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 2L - expectedFirstBucketCount)); } @@ -369,8 +370,8 @@ public void testSingleValueFieldWithCustomKey() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r1")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -378,8 +379,8 @@ public void testSingleValueFieldWithCustomKey() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r2")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -387,8 +388,8 @@ public void testSingleValueFieldWithCustomKey() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r3")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -429,8 +430,8 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r1")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -444,8 +445,8 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r2")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -459,8 +460,8 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("r3")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -502,8 +503,8 @@ public void testMultiValuedField() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -511,8 +512,8 @@ public void testMultiValuedField() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(3L)); @@ -520,8 +521,8 @@ public void testMultiValuedField() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); @@ -557,8 +558,8 @@ public void testMultiValuedFieldWithValueScript() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(1L)); @@ -566,8 +567,8 @@ public void testMultiValuedFieldWithValueScript() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -575,8 +576,8 @@ public void testMultiValuedFieldWithValueScript() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 1L)); @@ -616,8 +617,8 @@ public void testScriptSingleValue() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -625,8 +626,8 @@ public void testScriptSingleValue() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -634,8 +635,8 @@ public void testScriptSingleValue() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -675,8 +676,8 @@ public void testScriptMultiValued() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -684,8 +685,8 @@ public void testScriptMultiValued() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(3L)); @@ -693,8 +694,8 @@ public void testScriptMultiValued() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 2L)); @@ -723,8 +724,8 @@ public void testUnmapped() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -732,8 +733,8 @@ public void testUnmapped() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -741,8 +742,8 @@ public void testUnmapped() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -769,8 +770,8 @@ public void testUnmappedWithStringDates() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -778,8 +779,8 @@ public void testUnmappedWithStringDates() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -787,8 +788,8 @@ public void testUnmappedWithStringDates() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(0L)); @@ -815,8 +816,8 @@ public void testPartiallyUnmapped() throws Exception { Range.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("*-2012-02-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), nullValue()); - assertThat(((DateTime) bucket.getTo()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), nullValue()); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(2, 15))); assertThat(bucket.getFromAsString(), nullValue()); assertThat(bucket.getToAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -824,8 +825,8 @@ public void testPartiallyUnmapped() throws Exception { bucket = buckets.get(1); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-02-15T00:00:00.000Z-2012-03-15T00:00:00.000Z")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(2, 15))); - assertThat(((DateTime) bucket.getTo()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(2, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), equalTo(date(3, 15))); assertThat(bucket.getFromAsString(), equalTo("2012-02-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getDocCount(), equalTo(2L)); @@ -833,8 +834,8 @@ public void testPartiallyUnmapped() throws Exception { bucket = buckets.get(2); assertThat(bucket, notNullValue()); assertThat((String) bucket.getKey(), equalTo("2012-03-15T00:00:00.000Z-*")); - assertThat(((DateTime) bucket.getFrom()), equalTo(date(3, 15))); - assertThat(((DateTime) bucket.getTo()), nullValue()); + assertThat(((ZonedDateTime) bucket.getFrom()), equalTo(date(3, 15))); + assertThat(((ZonedDateTime) bucket.getTo()), nullValue()); assertThat(bucket.getFromAsString(), equalTo("2012-03-15T00:00:00.000Z")); assertThat(bucket.getToAsString(), nullValue()); assertThat(bucket.getDocCount(), equalTo(numDocs - 4L)); @@ -859,8 +860,8 @@ public void testEmptyAggregation() throws Exception { assertThat(dateRange.getName(), equalTo("date_range")); assertThat(buckets.size(), is(1)); assertThat((String) buckets.get(0).getKey(), equalTo("0-1")); - assertThat(((DateTime) buckets.get(0).getFrom()).getMillis(), equalTo(0L)); - assertThat(((DateTime) buckets.get(0).getTo()).getMillis(), equalTo(1L)); + assertThat(((ZonedDateTime) buckets.get(0).getFrom()).toInstant().toEpochMilli(), equalTo(0L)); + assertThat(((ZonedDateTime) buckets.get(0).getTo()).toInstant().toEpochMilli(), equalTo(1L)); assertThat(buckets.get(0).getDocCount(), equalTo(0L)); assertThat(buckets.get(0).getAggregations().asList().isEmpty(), is(true)); } @@ -903,7 +904,8 @@ public void testDontCacheScripts() throws Exception { params.put("fieldname", "date"); SearchResponse r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") .script(new Script(ScriptType.INLINE, "mockscript", DateScriptMocksPlugin.DOUBLE_PLUS_ONE_MONTH, params)) - .addRange(new DateTime(2012, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC), new DateTime(2013, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC))) + .addRange(ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))) .get(); assertSearchResponse(r); @@ -915,7 +917,8 @@ public void testDontCacheScripts() throws Exception { // To make sure that the cache is working test that a request not using // a script is cached r = client().prepareSearch("cache_test_idx").setSize(0).addAggregation(dateRange("foo").field("date") - .addRange(new DateTime(2012, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC), new DateTime(2013, 1, 1, 0, 0, 0, 0, DateTimeZone.UTC))) + .addRange(ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC))) .get(); assertSearchResponse(r); @@ -969,10 +972,9 @@ public void testRangeWithFormatStringValue() throws Exception { assertBucket(buckets.get(1), 1L, "3000000-4000000", 3000000L, 4000000L); // providing numeric input without format should throw an exception - Exception e = expectThrows(Exception.class, () -> client().prepareSearch(indexName).setSize(0) + ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange(1000000, 3000000).addRange(3000000, 4000000)).get()); - Throwable cause = e.getCause(); - assertThat(cause.getMessage(), + assertThat(e.getDetailedMessage(), containsString("failed to parse date field [1000000] with format [strict_hour_minute_second]")); } @@ -984,9 +986,9 @@ public void testRangeWithFormatNumericValue() throws Exception { String indexName = "dateformat_numeric_test_idx"; assertAcked(prepareCreate(indexName).addMapping("type", "date", "type=date,format=epoch_second")); indexRandom(true, - client().prepareIndex(indexName, "type", "1").setSource(jsonBuilder().startObject().field("date", 1000).endObject()), + client().prepareIndex(indexName, "type", "1").setSource(jsonBuilder().startObject().field("date", 1002).endObject()), client().prepareIndex(indexName, "type", "2").setSource(jsonBuilder().startObject().field("date", 2000).endObject()), - client().prepareIndex(indexName, "type", "3").setSource(jsonBuilder().startObject().field("date", 3000).endObject())); + client().prepareIndex(indexName, "type", "3").setSource(jsonBuilder().startObject().field("date", 3008).endObject())); // using no format should work when to/from is compatible with format in // mapping @@ -994,39 +996,39 @@ public void testRangeWithFormatNumericValue() throws Exception { .addAggregation(dateRange("date_range").field("date").addRange(1000, 3000).addRange(3000, 4000)).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); List buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + if (JavaVersion.current().getVersion().get(0) == 8) { + assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); + } else { + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } // using no format should also work when and to/from are string values searchResponse = client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange("1000", "3000").addRange("3000", "4000")).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + if (JavaVersion.current().getVersion().get(0) == 8) { + assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); + } else { + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } // also e-notation should work, fractional parts should be truncated searchResponse = client().prepareSearch(indexName).setSize(0) .addAggregation(dateRange("date_range").field("date").addRange(1.0e3, 3000.8123).addRange(3000.8123, 4.0e3)).get(); assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - - // also e-notation and floats provided as string also be truncated (see: #14641) - searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("1.0e3", "3.0e3").addRange("3.0e3", "4.0e3")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); - - searchResponse = client().prepareSearch(indexName).setSize(0) - .addAggregation(dateRange("date_range").field("date").addRange("1000.123", "3000.8").addRange("3000.8", "4000.3")).get(); - assertThat(searchResponse.getHits().getTotalHits().value, equalTo(3L)); - buckets = checkBuckets(searchResponse.getAggregations().get("date_range"), "date_range", 2); - assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); - assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + if (JavaVersion.current().getVersion().get(0) == 8) { + assertBucket(buckets.get(0), 2L, "1000.0-3000.0", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000.0-4000.0", 3000000L, 4000000L); + } else { + assertBucket(buckets.get(0), 2L, "1000-3000", 1000000L, 3000000L); + assertBucket(buckets.get(1), 1L, "3000-4000", 3000000L, 4000000L); + } // using different format should work when to/from is compatible with // format in aggregation @@ -1061,8 +1063,8 @@ private static List checkBuckets(Range dateRange, String expectedA private static void assertBucket(Bucket bucket, long bucketSize, String expectedKey, long expectedFrom, long expectedTo) { assertThat(bucket.getDocCount(), equalTo(bucketSize)); assertThat((String) bucket.getKey(), equalTo(expectedKey)); - assertThat(((DateTime) bucket.getFrom()).getMillis(), equalTo(expectedFrom)); - assertThat(((DateTime) bucket.getTo()).getMillis(), equalTo(expectedTo)); + assertThat(((ZonedDateTime) bucket.getFrom()).toInstant().toEpochMilli(), equalTo(expectedFrom)); + assertThat(((ZonedDateTime) bucket.getTo()).toInstant().toEpochMilli(), equalTo(expectedTo)); assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java index 1c198fd3ca5d6..34164bc28967c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateRangeTests.java @@ -65,7 +65,7 @@ protected DateRangeAggregationBuilder createTestAggregatorBuilder() { factory.missing(randomIntBetween(0, 10)); } if (randomBoolean()) { - factory.timeZone(randomDateTimeZone()); + factory.timeZone(randomZone()); } return factory; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java index e7bf0fe4cf700..b09277aca6c6d 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/MinDocCountIT.java @@ -22,11 +22,10 @@ import com.carrotsearch.hppc.LongHashSet; import com.carrotsearch.hppc.LongSet; import com.carrotsearch.randomizedtesting.generators.RandomStrings; - import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; @@ -124,7 +123,7 @@ public void setupSuiteScopeCluster() throws Exception { double doubleTerm = longTerm * Math.PI; ZonedDateTime time = ZonedDateTime.of(2014, 1, ((int) longTerm % 20) + 1, 0, 0, 0, 0, ZoneOffset.UTC); - String dateTerm = DateFormatters.forPattern("yyyy-MM-dd").format(time); + String dateTerm = DateFormatter.forPattern("yyyy-MM-dd").format(time); final int frequency = randomBoolean() ? 1 : randomIntBetween(2, 20); for (int j = 0; j < frequency; ++j) { indexRequests.add(client().prepareIndex("idx", "type").setSource(jsonBuilder() diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java index ac985660399d7..d31f7a89b462e 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilderTests.java @@ -42,7 +42,7 @@ private DateHistogramValuesSourceBuilder randomDateHistogramSourceBuilder() { histo.interval(randomNonNegativeLong()); } if (randomBoolean()) { - histo.timeZone(randomDateTimeZone()); + histo.timeZone(randomZone()); } if (randomBoolean()) { histo.missingBucket(true); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java index e945eeba519f4..5f219ee6be948 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregatorTests.java @@ -39,6 +39,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.IpFieldMapper; @@ -57,12 +58,12 @@ import org.elasticsearch.search.aggregations.metrics.TopHitsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.sort.SortOrder; -import org.joda.time.DateTimeZone; import org.junit.After; import org.junit.Before; import java.io.IOException; import java.net.InetAddress; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -1155,8 +1156,7 @@ public void testThatDateHistogramFailsFormatAfter() throws IOException { }, (result) -> {} )); - assertThat(exc.getCause(), instanceOf(IllegalArgumentException.class)); - assertThat(exc.getCause().getMessage(), containsString("Parse failure")); + assertThat(exc.getMessage(), containsString("failed to parse date field [1474329600000]")); } public void testWithDateHistogramAndTimeZone() throws IOException { @@ -1176,7 +1176,7 @@ public void testWithDateHistogramAndTimeZone() throws IOException { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") .dateHistogramInterval(DateHistogramInterval.days(1)) - .timeZone(DateTimeZone.forOffsetHours(1)); + .timeZone(ZoneOffset.ofHours(1)); return new CompositeAggregationBuilder("name", Collections.singletonList(histo)); }, (result) -> { @@ -1196,7 +1196,7 @@ public void testWithDateHistogramAndTimeZone() throws IOException { DateHistogramValuesSourceBuilder histo = new DateHistogramValuesSourceBuilder("date") .field("date") .dateHistogramInterval(DateHistogramInterval.days(1)) - .timeZone(DateTimeZone.forOffsetHours(1)); + .timeZone(ZoneOffset.ofHours(1)); return new CompositeAggregationBuilder("name", Collections.singletonList(histo)) .aggregateAfter(createAfterKey("date", 1474326000000L)); @@ -1835,6 +1835,6 @@ private static Map> createDocument(Object... fields) { } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java index be9a760150427..35bf575d046be 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/composite/InternalCompositeTests.java @@ -30,10 +30,10 @@ import org.elasticsearch.search.aggregations.ParsedAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; -import org.joda.time.DateTimeZone; import org.junit.After; import java.io.IOException; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -64,7 +64,7 @@ private static DocValueFormat randomDocValueFormat(boolean isLong) { if (isLong) { // we use specific format only for date histogram on a long/date field if (randomBoolean()) { - return new DocValueFormat.DateTime(DateFormatter.forPattern("epoch_second"), DateTimeZone.forOffsetHours(1)); + return new DocValueFormat.DateTime(DateFormatter.forPattern("epoch_second"), ZoneOffset.ofHours(1)); } else { return DocValueFormat.RAW; } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java index 6b4d1482adb5e..9293b33e22f43 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregatorTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilders; @@ -41,11 +42,13 @@ import org.elasticsearch.search.aggregations.metrics.InternalStats; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.hamcrest.Matchers; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import org.junit.Assert; import java.io.IOException; +import java.time.LocalDate; +import java.time.YearMonth; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -59,17 +62,17 @@ public class AutoDateHistogramAggregatorTests extends AggregatorTestCase { private static final String DATE_FIELD = "date"; private static final String INSTANT_FIELD = "instant"; - private static final List DATES_WITH_TIME = Arrays.asList( - new DateTime(2010, 3, 12, 1, 7, 45, DateTimeZone.UTC), - new DateTime(2010, 4, 27, 3, 43, 34, DateTimeZone.UTC), - new DateTime(2012, 5, 18, 4, 11, 0, DateTimeZone.UTC), - new DateTime(2013, 5, 29, 5, 11, 31, DateTimeZone.UTC), - new DateTime(2013, 10, 31, 8, 24, 5, DateTimeZone.UTC), - new DateTime(2015, 2, 13, 13, 9, 32, DateTimeZone.UTC), - new DateTime(2015, 6, 24, 13, 47, 43, DateTimeZone.UTC), - new DateTime(2015, 11, 13, 16, 14, 34, DateTimeZone.UTC), - new DateTime(2016, 3, 4, 17, 9, 50, DateTimeZone.UTC), - new DateTime(2017, 12, 12, 22, 55, 46, DateTimeZone.UTC)); + private static final List DATES_WITH_TIME = Arrays.asList( + ZonedDateTime.of(2010, 3, 12, 1, 7, 45, 0, ZoneOffset.UTC), + ZonedDateTime.of(2010, 4, 27, 3, 43, 34, 0, ZoneOffset.UTC), + ZonedDateTime.of(2012, 5, 18, 4, 11, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 5, 29, 5, 11, 31, 0, ZoneOffset.UTC), + ZonedDateTime.of(2013, 10, 31, 8, 24, 5, 0, ZoneOffset.UTC), + ZonedDateTime.of(2015, 2, 13, 13, 9, 32, 0, ZoneOffset.UTC), + ZonedDateTime.of(2015, 6, 24, 13, 47, 43, 0, ZoneOffset.UTC), + ZonedDateTime.of(2015, 11, 13, 16, 14, 34, 0, ZoneOffset.UTC), + ZonedDateTime.of(2016, 3, 4, 17, 9, 50, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 12, 12, 22, 55, 46, 0, ZoneOffset.UTC)); private static final Query DEFAULT_QUERY = new MatchAllDocsQuery(); @@ -184,7 +187,7 @@ public void testSubAggregations() throws IOException { } public void testNoDocs() throws IOException { - final List dates = Collections.emptyList(); + final List dates = Collections.emptyList(); final Consumer aggregation = agg -> agg.setNumBuckets(10).field(DATE_FIELD); testSearchCase(DEFAULT_QUERY, dates, aggregation, @@ -209,8 +212,10 @@ public void testAggregateWrongField() throws IOException { } public void testIntervalYear() throws IOException { - final long start = new DateTime(DateTimeZone.UTC).withDate(2015, 1, 1).getMillis(); - final long end = new DateTime(DateTimeZone.UTC).withDate(2017, 12, 31).getMillis(); + + + final long start = LocalDate.of(2015, 1, 1).atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli(); + final long end = LocalDate.of(2017, 12, 31).atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli(); final Query rangeQuery = LongPoint.newRangeQuery(INSTANT_FIELD, start, end); testSearchCase(rangeQuery, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), @@ -228,8 +233,8 @@ public void testIntervalYear() throws IOException { testSearchAndReduceCase(rangeQuery, DATES_WITH_TIME, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { - final DateTime startDate = new DateTime(2015, 1, 1, 0, 0, DateTimeZone.UTC); - final Map expectedDocCount = new HashMap<>(); + final ZonedDateTime startDate = ZonedDateTime.of(2015, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); + final Map expectedDocCount = new HashMap<>(); expectedDocCount.put(startDate, 3); expectedDocCount.put(startDate.plusYears(1), 1); expectedDocCount.put(startDate.plusYears(2), 1); @@ -243,13 +248,13 @@ public void testIntervalYear() throws IOException { } public void testIntervalMonth() throws IOException { - final List datesForMonthInterval = Arrays.asList( - new DateTime(2017, 1, 1, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 3, 4, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 3, 5, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 3, 6, 0, 0, 0, DateTimeZone.UTC)); + final List datesForMonthInterval = Arrays.asList( + ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 3, 4, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 3, 5, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 3, 6, 0, 0, 0, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForMonthInterval, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { final List buckets = histogram.getBuckets(); @@ -263,7 +268,7 @@ public void testIntervalMonth() throws IOException { testSearchAndReduceCase(DEFAULT_QUERY, datesForMonthInterval, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), histogram -> { - final Map expectedDocCount = new HashMap<>(); + final Map expectedDocCount = new HashMap<>(); expectedDocCount.put(datesForMonthInterval.get(0).withDayOfMonth(1), 1); expectedDocCount.put(datesForMonthInterval.get(1).withDayOfMonth(1), 2); expectedDocCount.put(datesForMonthInterval.get(3).withDayOfMonth(1), 3); @@ -287,15 +292,15 @@ public void testWithLargeNumberOfBuckets() { } public void testIntervalDay() throws IOException { - final List datesForDayInterval = Arrays.asList( - new DateTime(2017, 2, 1, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 5, 0, 0, 0, DateTimeZone.UTC)); - final Map expectedDocCount = new HashMap<>(); + final List datesForDayInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 5, 0, 0, 0, 0, ZoneOffset.UTC)); + final Map expectedDocCount = new HashMap<>(); expectedDocCount.put(datesForDayInterval.get(0), 1); expectedDocCount.put(datesForDayInterval.get(1), 2); expectedDocCount.put(datesForDayInterval.get(3), 3); @@ -321,16 +326,16 @@ public void testIntervalDay() throws IOException { } public void testIntervalDayWithTZ() throws IOException { - final List datesForDayInterval = Arrays.asList( - new DateTime(2017, 2, 1, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 2, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 3, 0, 0, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 5, 0, 0, 0, DateTimeZone.UTC)); + final List datesForDayInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 2, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 3, 0, 0, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 5, 0, 0, 0, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForDayInterval, - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(ZoneOffset.ofHours(-1)), histogram -> { final Map expectedDocCount = new HashMap<>(); expectedDocCount.put("2017-01-31T23:00:00.000-01:00", 1); expectedDocCount.put("2017-02-01T23:00:00.000-01:00", 2); @@ -343,7 +348,7 @@ public void testIntervalDayWithTZ() throws IOException { assertTrue(AggregationInspectionHelper.hasValue(histogram)); }); testSearchAndReduceCase(DEFAULT_QUERY, datesForDayInterval, - aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), histogram -> { + aggregation -> aggregation.setNumBuckets(5).field(DATE_FIELD).timeZone(ZoneOffset.ofHours(-1)), histogram -> { final Map expectedDocCount = new HashMap<>(); expectedDocCount.put("2017-01-31T00:00:00.000-01:00", 1); expectedDocCount.put("2017-02-01T00:00:00.000-01:00", 2); @@ -358,17 +363,17 @@ public void testIntervalDayWithTZ() throws IOException { } public void testIntervalHour() throws IOException { - final List datesForHourInterval = Arrays.asList( - new DateTime(2017, 2, 1, 9, 2, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 35, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 10, 15, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 13, 6, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 14, 4, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 14, 5, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 15, 59, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 6, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 48, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 59, 0, DateTimeZone.UTC)); + final List datesForHourInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 9, 2, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 35, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 10, 15, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 13, 6, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 14, 4, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 14, 5, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 15, 59, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 6, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 48, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 59, 0, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForHourInterval, aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD), histogram -> { @@ -384,13 +389,13 @@ public void testIntervalHour() throws IOException { testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD), histogram -> { - final Map expectedDocCount = new HashMap<>(); - expectedDocCount.put(datesForHourInterval.get(0).withMinuteOfHour(0), 2); - expectedDocCount.put(datesForHourInterval.get(2).withMinuteOfHour(0), 1); - expectedDocCount.put(datesForHourInterval.get(3).withMinuteOfHour(0), 1); - expectedDocCount.put(datesForHourInterval.get(4).withMinuteOfHour(0), 2); - expectedDocCount.put(datesForHourInterval.get(6).withMinuteOfHour(0), 1); - expectedDocCount.put(datesForHourInterval.get(7).withMinuteOfHour(0), 3); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForHourInterval.get(0).withMinute(0), 2); + expectedDocCount.put(datesForHourInterval.get(2).withMinute(0), 1); + expectedDocCount.put(datesForHourInterval.get(3).withMinute(0), 1); + expectedDocCount.put(datesForHourInterval.get(4).withMinute(0), 2); + expectedDocCount.put(datesForHourInterval.get(6).withMinute(0), 1); + expectedDocCount.put(datesForHourInterval.get(7).withMinute(0), 3); final List buckets = histogram.getBuckets(); assertEquals(8, buckets.size()); buckets.forEach(bucket -> @@ -400,10 +405,10 @@ public void testIntervalHour() throws IOException { testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, aggregation -> aggregation.setNumBuckets(6).field(DATE_FIELD), histogram -> { - final Map expectedDocCount = new HashMap<>(); - expectedDocCount.put(datesForHourInterval.get(0).withMinuteOfHour(0), 3); - expectedDocCount.put(datesForHourInterval.get(0).plusHours(3).withMinuteOfHour(0), 3); - expectedDocCount.put(datesForHourInterval.get(0).plusHours(6).withMinuteOfHour(0), 4); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForHourInterval.get(0).withMinute(0), 3); + expectedDocCount.put(datesForHourInterval.get(0).plusHours(3).withMinute(0), 3); + expectedDocCount.put(datesForHourInterval.get(0).plusHours(6).withMinute(0), 4); final List buckets = histogram.getBuckets(); assertEquals(expectedDocCount.size(), buckets.size()); buckets.forEach(bucket -> @@ -413,22 +418,23 @@ public void testIntervalHour() throws IOException { } public void testIntervalHourWithTZ() throws IOException { - final List datesForHourInterval = Arrays.asList( - new DateTime(2017, 2, 1, 9, 2, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 35, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 10, 15, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 13, 6, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 14, 4, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 14, 5, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 15, 59, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 6, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 48, 0, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 16, 59, 0, DateTimeZone.UTC)); + final List datesForHourInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 9, 2, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 35, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 10, 15, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 13, 6, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 14, 4, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 14, 5, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 15, 59, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 6, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 48, 0, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 16, 59, 0, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForHourInterval, - aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), + aggregation -> aggregation.setNumBuckets(8).field(DATE_FIELD).timeZone(ZoneOffset.ofHours(-1)), histogram -> { final List dateStrings = datesForHourInterval.stream() - .map(dateTime -> dateTime.withZone(DateTimeZone.forOffsetHours(-1)).toString()).collect(Collectors.toList()); + .map(dateTime -> DateFormatter.forPattern("strict_date_time") + .format(dateTime.withZoneSameInstant(ZoneOffset.ofHours(-1)))).collect(Collectors.toList()); final List buckets = histogram.getBuckets(); assertEquals(datesForHourInterval.size(), buckets.size()); for (int i = 0; i < buckets.size(); i++) { @@ -439,7 +445,7 @@ public void testIntervalHourWithTZ() throws IOException { } ); testSearchAndReduceCase(DEFAULT_QUERY, datesForHourInterval, - aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD).timeZone(DateTimeZone.forOffsetHours(-1)), + aggregation -> aggregation.setNumBuckets(10).field(DATE_FIELD).timeZone(ZoneOffset.ofHours(-1)), histogram -> { final Map expectedDocCount = new HashMap<>(); expectedDocCount.put("2017-02-01T08:00:00.000-01:00", 2); @@ -458,10 +464,10 @@ public void testIntervalHourWithTZ() throws IOException { public void testRandomSecondIntervals() throws IOException { final int length = 120; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusSeconds(i); + final ZonedDateTime date = startDate.plusSeconds(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -487,10 +493,10 @@ public void testRandomSecondIntervals() throws IOException { public void testRandomMinuteIntervals() throws IOException { final int length = 120; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusMinutes(i); + final ZonedDateTime date = startDate.plusMinutes(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -516,10 +522,10 @@ public void testRandomMinuteIntervals() throws IOException { public void testRandomHourIntervals() throws IOException { final int length = 72; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusHours(i); + final ZonedDateTime date = startDate.plusHours(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -544,10 +550,10 @@ public void testRandomHourIntervals() throws IOException { public void testRandomDayIntervals() throws IOException { final int length = 140; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusDays(i); + final ZonedDateTime date = startDate.plusDays(i); dataset.add(date); } final int randomChoice = randomIntBetween(1, 3); @@ -583,17 +589,17 @@ public void testRandomDayIntervals() throws IOException { final int randomIndex = randomInt(2); final Histogram.Bucket bucket = buckets.get(randomIndex); assertEquals(startDate.plusMonths(randomIndex), bucket.getKey()); - assertEquals(startDate.plusMonths(randomIndex).dayOfMonth().getMaximumValue(), bucket.getDocCount()); + assertEquals(YearMonth.from(startDate.plusMonths(randomIndex)).lengthOfMonth(), bucket.getDocCount()); }); } } public void testRandomMonthIntervals() throws IOException { final int length = 60; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusMonths(i); + final ZonedDateTime date = startDate.plusMonths(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -617,10 +623,10 @@ public void testRandomMonthIntervals() throws IOException { public void testRandomYearIntervals() throws IOException { final int length = 300; - final List dataset = new ArrayList<>(length); - final DateTime startDate = new DateTime(2017, 1, 1, 0, 0, DateTimeZone.UTC); + final List dataset = new ArrayList<>(length); + final ZonedDateTime startDate = ZonedDateTime.of(2017, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); for (int i = 0; i < length; i++) { - final DateTime date = startDate.plusYears(i); + final ZonedDateTime date = startDate.plusYears(i); dataset.add(date); } final Map bucketsToExpectedDocCountMap = new HashMap<>(); @@ -646,12 +652,12 @@ public void testRandomYearIntervals() throws IOException { } public void testIntervalMinute() throws IOException { - final List datesForMinuteInterval = Arrays.asList( - new DateTime(2017, 2, 1, 9, 2, 35, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 2, 59, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 15, 37, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 16, 4, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 9, 16, 42, DateTimeZone.UTC)); + final List datesForMinuteInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 9, 2, 35, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 2, 59, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 15, 37, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 16, 4, 0, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 9, 16, 42, 0, ZoneOffset.UTC)); testSearchCase(DEFAULT_QUERY, datesForMinuteInterval, aggregation -> aggregation.setNumBuckets(4).field(DATE_FIELD), @@ -668,10 +674,10 @@ public void testIntervalMinute() throws IOException { testSearchAndReduceCase(DEFAULT_QUERY, datesForMinuteInterval, aggregation -> aggregation.setNumBuckets(15).field(DATE_FIELD), histogram -> { - final Map expectedDocCount = new HashMap<>(); - expectedDocCount.put(datesForMinuteInterval.get(0).withSecondOfMinute(0), 2); - expectedDocCount.put(datesForMinuteInterval.get(2).withSecondOfMinute(0), 1); - expectedDocCount.put(datesForMinuteInterval.get(3).withSecondOfMinute(0), 2); + final Map expectedDocCount = new HashMap<>(); + expectedDocCount.put(datesForMinuteInterval.get(0).withSecond(0), 2); + expectedDocCount.put(datesForMinuteInterval.get(2).withSecond(0), 1); + expectedDocCount.put(datesForMinuteInterval.get(3).withSecond(0), 2); final List buckets = histogram.getBuckets(); assertEquals(15, buckets.size()); buckets.forEach(bucket -> @@ -681,15 +687,15 @@ public void testIntervalMinute() throws IOException { } public void testIntervalSecond() throws IOException { - final List datesForSecondInterval = Arrays.asList( - new DateTime(2017, 2, 1, 0, 0, 5, 15, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 7, 299, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 7, 74, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 11, 688, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 11, 210, DateTimeZone.UTC), - new DateTime(2017, 2, 1, 0, 0, 11, 380, DateTimeZone.UTC)); - final DateTime startDate = datesForSecondInterval.get(0).withMillisOfSecond(0); - final Map expectedDocCount = new HashMap<>(); + final List datesForSecondInterval = Arrays.asList( + ZonedDateTime.of(2017, 2, 1, 0, 0, 5, 15, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 7, 299, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 7, 74, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 11, 688, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 11, 210, ZoneOffset.UTC), + ZonedDateTime.of(2017, 2, 1, 0, 0, 11, 380, ZoneOffset.UTC)); + final ZonedDateTime startDate = datesForSecondInterval.get(0).withNano(0); + final Map expectedDocCount = new HashMap<>(); expectedDocCount.put(startDate, 1); expectedDocCount.put(startDate.plusSeconds(2), 2); expectedDocCount.put(startDate.plusSeconds(6), 3); @@ -712,19 +718,19 @@ public void testIntervalSecond() throws IOException { ); } - private void testSearchCase(final Query query, final List dataset, + private void testSearchCase(final Query query, final List dataset, final Consumer configure, final Consumer verify) throws IOException { executeTestCase(false, query, dataset, configure, verify); } - private void testSearchAndReduceCase(final Query query, final List dataset, + private void testSearchAndReduceCase(final Query query, final List dataset, final Consumer configure, final Consumer verify) throws IOException { executeTestCase(true, query, dataset, configure, verify); } - private void testBothCases(final Query query, final List dataset, + private void testBothCases(final Query query, final List dataset, final Consumer configure, final Consumer verify) throws IOException { executeTestCase(false, query, dataset, configure, verify); @@ -745,18 +751,18 @@ protected IndexSettings createIndexSettings() { ); } - private void executeTestCase(final boolean reduced, final Query query, final List dataset, + private void executeTestCase(final boolean reduced, final Query query, final List dataset, final Consumer configure, final Consumer verify) throws IOException { try (Directory directory = newDirectory()) { try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { final Document document = new Document(); - for (final DateTime date : dataset) { + for (final ZonedDateTime date : dataset) { if (frequently()) { indexWriter.commit(); } - final long instant = date.getMillis(); + final long instant = date.toInstant().toEpochMilli(); document.add(new SortedNumericDocValuesField(DATE_FIELD, instant)); document.add(new LongPoint(INSTANT_FIELD, instant)); indexWriter.addDocument(document); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index c1b9396664a22..2fbf60a3ddccb 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorTestCase; @@ -474,6 +475,6 @@ private void executeTestCase(boolean reduced, Query query, List dataset, } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java index 5148b0b85754f..c65b21ef72d32 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java @@ -31,9 +31,10 @@ import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; import org.elasticsearch.search.aggregations.BucketOrder; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; @@ -137,7 +138,7 @@ private static Document documentForDate(String field, long millis) { } public void testRewriteTimeZone() throws IOException { - DateFormatter format = DateFormatters.forPattern("strict_date_optional_time"); + DateFormatter format = DateFormatter.forPattern("strict_date_optional_time"); try (Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) { @@ -166,15 +167,15 @@ public void testRewriteTimeZone() throws IOException { assertNull(builder.rewriteTimeZone(shardContextThatCrosses)); // fixed timeZone => no rewrite - DateTimeZone tz = DateTimeZone.forOffsetHours(1); + ZoneId tz = ZoneOffset.ofHours(1); builder.timeZone(tz); assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross)); assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); // daylight-saving-times => rewrite if doesn't cross - tz = DateTimeZone.forID("Europe/Paris"); + tz = ZoneId.of("Europe/Paris"); builder.timeZone(tz); - assertEquals(DateTimeZone.forOffsetHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertEquals(ZoneOffset.ofHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); // Rounded values are no longer all within the same transitions => no rewrite @@ -187,7 +188,7 @@ public void testRewriteTimeZone() throws IOException { builder.timeZone(tz); builder.interval(1000L * 60 * 60 * 24); // ~ 1 day - assertEquals(DateTimeZone.forOffsetHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertEquals(ZoneOffset.ofHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); // Because the interval is large, rounded values are not diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java index 486f78778c452..0980bb7cf97ec 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBoundsTests.java @@ -36,13 +36,9 @@ import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; -import org.joda.time.Instant; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; import java.io.IOException; -import java.util.Locale; +import java.time.ZoneOffset; import static java.lang.Math.max; import static java.lang.Math.min; @@ -66,17 +62,19 @@ public static ExtendedBounds randomExtendedBounds() { * Construct a random {@link ExtendedBounds} in pre-parsed form. */ public static ExtendedBounds randomParsedExtendedBounds() { + long maxDateValue = 253402300799999L; // end of year 9999 + long minDateValue = -377705116800000L; // beginning of year -9999 if (randomBoolean()) { // Construct with one missing bound if (randomBoolean()) { - return new ExtendedBounds(null, randomLong()); + return new ExtendedBounds(null, maxDateValue); } - return new ExtendedBounds(randomLong(), null); + return new ExtendedBounds(minDateValue, null); } - long a = randomLong(); + long a = randomLongBetween(minDateValue, maxDateValue); long b; do { - b = randomLong(); + b = randomLongBetween(minDateValue, maxDateValue); } while (a == b); long min = min(a, b); long max = max(a, b); @@ -88,9 +86,9 @@ public static ExtendedBounds randomParsedExtendedBounds() { */ public static ExtendedBounds unparsed(ExtendedBounds template) { // It'd probably be better to randomize the formatter - DateTimeFormatter formatter = ISODateTimeFormat.dateTime().withLocale(Locale.ROOT).withZone(DateTimeZone.UTC); - String minAsStr = template.getMin() == null ? null : formatter.print(new Instant(template.getMin())); - String maxAsStr = template.getMax() == null ? null : formatter.print(new Instant(template.getMax())); + DateFormatter formatter = DateFormatter.forPattern("strict_date_time").withZone(ZoneOffset.UTC); + String minAsStr = template.getMin() == null ? null : formatter.formatMillis(template.getMin()); + String maxAsStr = template.getMax() == null ? null : formatter.formatMillis(template.getMax()); return new ExtendedBounds(minAsStr, maxAsStr); } @@ -104,7 +102,7 @@ public void testParseAndValidate() { null, xContentRegistry(), writableRegistry(), null, null, () -> now, null); when(context.getQueryShardContext()).thenReturn(qsc); DateFormatter formatter = DateFormatter.forPattern("dateOptionalTime"); - DocValueFormat format = new DocValueFormat.DateTime(formatter, DateTimeZone.UTC); + DocValueFormat format = new DocValueFormat.DateTime(formatter, ZoneOffset.UTC); ExtendedBounds expected = randomParsedExtendedBounds(); ExtendedBounds parsed = unparsed(expected).parseAndValidate("test", context, format); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index dd3425c20f43c..fe5c967f54be8 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.DateTimeUnit; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; @@ -28,12 +28,12 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalAutoDateHistogram.BucketInfo; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; import java.time.Instant; import java.time.OffsetDateTime; +import java.time.ZoneId; import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -89,16 +89,16 @@ protected InternalAutoDateHistogram createTestInstance(String name, */ public void testGetAppropriateRoundingUsesCorrectIntervals() { RoundingInfo[] roundings = new RoundingInfo[6]; - DateTimeZone timeZone = DateTimeZone.UTC; + ZoneId timeZone = ZoneOffset.UTC; // Since we pass 0 as the starting index to getAppropriateRounding, we'll also use // an innerInterval that is quite large, such that targetBuckets * roundings[i].getMaximumInnerInterval() // will be larger than the estimate. - roundings[0] = new RoundingInfo(createRounding(DateTimeUnit.SECOND_OF_MINUTE, timeZone), - 1000L, "s", 1000); - roundings[1] = new RoundingInfo(createRounding(DateTimeUnit.MINUTES_OF_HOUR, timeZone), - 60 * 1000L, "m", 1, 5, 10, 30); - roundings[2] = new RoundingInfo(createRounding(DateTimeUnit.HOUR_OF_DAY, timeZone), - 60 * 60 * 1000L, "h", 1, 3, 12); + roundings[0] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.SECOND_OF_MINUTE, timeZone), + 1000L, "s",1000); + roundings[1] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.MINUTES_OF_HOUR, timeZone), + 60 * 1000L, "m",1, 5, 10, 30); + roundings[2] = new RoundingInfo(createRounding(Rounding.DateTimeUnit.HOUR_OF_DAY, timeZone), + 60 * 60 * 1000L, "h",1, 3, 12); OffsetDateTime timestamp = Instant.parse("2018-01-01T00:00:01.000Z").atOffset(ZoneOffset.UTC); // We want to pass a roundingIdx of zero, because in order to reproduce this bug, we need the function @@ -117,7 +117,7 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List= keyForBucket && roundedBucketKey < keyForBucket + intervalInMillis) { @@ -194,7 +194,7 @@ protected void assertReduced(InternalAutoDateHistogram reduced, List actualCounts = new TreeMap<>(); for (Histogram.Bucket bucket : reduced.getBuckets()) { - actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), + actualCounts.compute(((ZonedDateTime) bucket.getKey()).toInstant().toEpochMilli(), (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); } assertEquals(expectedCounts, actualCounts); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java index dd5d06f8785f7..961a05a7c40fd 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogramTests.java @@ -19,8 +19,8 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.BucketOrder; @@ -28,8 +28,8 @@ import org.elasticsearch.search.aggregations.ParsedMultiBucketAggregation; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.test.InternalMultiBucketAggregationTestCase; -import org.joda.time.DateTime; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -104,7 +104,7 @@ protected void assertReduced(InternalDateHistogram reduced, List expectedCounts = new TreeMap<>(); for (Histogram histogram : inputs) { for (Histogram.Bucket bucket : histogram.getBuckets()) { - expectedCounts.compute(((DateTime) bucket.getKey()).getMillis(), + expectedCounts.compute(((ZonedDateTime) bucket.getKey()).toInstant().toEpochMilli(), (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); } } @@ -139,7 +139,7 @@ protected void assertReduced(InternalDateHistogram reduced, List actualCounts = new TreeMap<>(); for (Histogram.Bucket bucket : reduced.getBuckets()) { - actualCounts.compute(((DateTime) bucket.getKey()).getMillis(), + actualCounts.compute(((ZonedDateTime) bucket.getKey()).toInstant().toEpochMilli(), (key, oldValue) -> (oldValue == null ? 0 : oldValue) + bucket.getDocCount()); } assertEquals(expectedCounts, actualCounts); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java index d0027208b104b..78c4d18218eea 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregatorTests.java @@ -41,9 +41,9 @@ import org.elasticsearch.search.aggregations.metrics.WeightedAvgAggregator; import org.elasticsearch.search.aggregations.support.AggregationInspectionHelper; import org.elasticsearch.search.aggregations.support.MultiValuesSourceFieldConfig; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneOffset; import java.util.Arrays; import java.util.Collections; import java.util.function.Consumer; @@ -260,7 +260,7 @@ public void testWeightSetTimezone() throws IOException { MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("value_field").build(); MultiValuesSourceFieldConfig weightConfig = new MultiValuesSourceFieldConfig.Builder() .setFieldName("weight_field") - .setTimeZone(DateTimeZone.UTC) + .setTimeZone(ZoneOffset.UTC) .build(); WeightedAvgAggregationBuilder aggregationBuilder = new WeightedAvgAggregationBuilder("_name") .value(valueConfig) @@ -283,7 +283,7 @@ public void testWeightSetTimezone() throws IOException { public void testValueSetTimezone() throws IOException { MultiValuesSourceFieldConfig valueConfig = new MultiValuesSourceFieldConfig.Builder() .setFieldName("value_field") - .setTimeZone(DateTimeZone.UTC) + .setTimeZone(ZoneOffset.UTC) .build(); MultiValuesSourceFieldConfig weightConfig = new MultiValuesSourceFieldConfig.Builder().setFieldName("weight_field").build(); WeightedAvgAggregationBuilder aggregationBuilder = new WeightedAvgAggregationBuilder("_name") diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java index 59af941812175..3ed1a15603e84 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketAggregatorTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -40,9 +41,6 @@ import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.InternalAvg; -import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; -import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregationBuilder; -import org.elasticsearch.search.aggregations.pipeline.AvgBucketPipelineAggregator; import java.io.IOException; import java.util.ArrayList; @@ -141,8 +139,7 @@ public void testSameAggNames() throws IOException { } } - private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java index 7cb4371354c3b..22a4fdbdf67bf 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/BucketSortIT.java @@ -31,9 +31,9 @@ import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ESIntegTestCase; -import org.joda.time.DateTime; import java.io.IOException; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -132,10 +132,10 @@ public void testEmptyBucketSort() { assertThat(histogram, notNullValue()); // These become our baseline List timeBuckets = histogram.getBuckets(); - DateTime previousKey = (DateTime) timeBuckets.get(0).getKey(); + ZonedDateTime previousKey = (ZonedDateTime) timeBuckets.get(0).getKey(); for (Histogram.Bucket timeBucket : timeBuckets) { - assertThat(previousKey, lessThanOrEqualTo((DateTime) timeBucket.getKey())); - previousKey = (DateTime) timeBucket.getKey(); + assertThat(previousKey, lessThanOrEqualTo((ZonedDateTime) timeBucket.getKey())); + previousKey = (ZonedDateTime) timeBucket.getKey(); } // Now let's test using size diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java index b1eec2b0f48ca..4b23304e642c0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumAggregatorTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; import org.elasticsearch.common.CheckedConsumer; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -365,6 +366,6 @@ private void executeTestCase(Query query, AggregationBuilder aggBuilder, Consume } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java index 62fe7a2a45a60..db1ee6ab18916 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/DateDerivativeIT.java @@ -21,7 +21,8 @@ import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; @@ -31,12 +32,14 @@ import org.elasticsearch.search.aggregations.support.AggregationPath; import org.elasticsearch.test.ESIntegTestCase; import org.hamcrest.Matcher; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.DateTimeFormat; import org.junit.After; import java.io.IOException; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -60,19 +63,11 @@ public class DateDerivativeIT extends ESIntegTestCase { private static final String IDX_DST_END = "idx_dst_end"; private static final String IDX_DST_KATHMANDU = "idx_dst_kathmandu"; - private DateTime date(int month, int day) { - return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); + private ZonedDateTime date(int month, int day) { + return ZonedDateTime.of(2012, month, day, 0, 0, 0, 0, ZoneOffset.UTC); } - private DateTime date(String date) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(date); - } - - private static String format(DateTime date, String pattern) { - return DateTimeFormat.forPattern(pattern).print(date); - } - - private static IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception { + private static IndexRequestBuilder indexDoc(String idx, ZonedDateTime date, int value) throws Exception { return client().prepareIndex(idx, "type").setSource( jsonBuilder().startObject().timeField("date", date).field("value", value).endObject()); } @@ -124,27 +119,27 @@ public void testSingleValuedField() throws Exception { List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, nullValue()); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(1d)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); @@ -166,28 +161,28 @@ public void testSingleValuedFieldNormalised() throws Exception { List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); Derivative docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, nullValue()); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), closeTo(1d, 0.00001)); assertThat(docCountDeriv.normalizedValue(), closeTo(1d / 31d, 0.00001)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); @@ -202,11 +197,14 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep createIndex(IDX_DST_START); List builders = new ArrayList<>(); - DateTimeZone timezone = DateTimeZone.forID("CET"); - addNTimes(1, IDX_DST_START, new DateTime("2012-03-24T01:00:00", timezone), builders); - addNTimes(2, IDX_DST_START, new DateTime("2012-03-25T01:00:00", timezone), builders); // day with dst shift, only 23h long - addNTimes(3, IDX_DST_START, new DateTime("2012-03-26T01:00:00", timezone), builders); - addNTimes(4, IDX_DST_START, new DateTime("2012-03-27T01:00:00", timezone), builders); + ZoneId timezone = ZoneId.of("CET"); + DateFormatter formatter = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZone(timezone); + // epoch millis: 1332547200000 + addNTimes(1, IDX_DST_START, DateFormatters.toZonedDateTime(formatter.parse("2012-03-24T01:00:00")), builders); + // day with dst shift, only 23h long + addNTimes(2, IDX_DST_START, DateFormatters.toZonedDateTime(formatter.parse("2012-03-25T01:00:00")), builders); + addNTimes(3, IDX_DST_START, DateFormatters.toZonedDateTime(formatter.parse("2012-03-26T01:00:00")), builders); + addNTimes(4, IDX_DST_START, DateFormatters.toZonedDateTime(formatter.parse("2012-03-27T01:00:00")), builders); indexRandom(true, builders); ensureSearchable(); @@ -225,11 +223,23 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertBucket(buckets.get(0), new DateTime("2012-03-24", timezone).toDateTime(DateTimeZone.UTC), 1L, nullValue(), null, null); - assertBucket(buckets.get(1), new DateTime("2012-03-25", timezone).toDateTime(DateTimeZone.UTC), 2L, notNullValue(), 1d, 1d / 24d); + DateFormatter dateFormatter = DateFormatter.forPattern("yyyy-MM-dd"); + ZonedDateTime expectedKeyFirstBucket = + LocalDate.from(dateFormatter.parse("2012-03-24")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null, null); + + ZonedDateTime expectedKeySecondBucket = + LocalDate.from(dateFormatter.parse("2012-03-25")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(1), expectedKeySecondBucket,2L, notNullValue(), 1d, 1d / 24d); + // the following is normalized using a 23h bucket width - assertBucket(buckets.get(2), new DateTime("2012-03-26", timezone).toDateTime(DateTimeZone.UTC), 3L, notNullValue(), 1d, 1d / 23d); - assertBucket(buckets.get(3), new DateTime("2012-03-27", timezone).toDateTime(DateTimeZone.UTC), 4L, notNullValue(), 1d, 1d / 24d); + ZonedDateTime expectedKeyThirdBucket = + LocalDate.from(dateFormatter.parse("2012-03-26")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d, 1d / 23d); + + ZonedDateTime expectedKeyFourthBucket = + LocalDate.from(dateFormatter.parse("2012-03-27")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d, 1d / 24d); } /** @@ -237,13 +247,15 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstStart() throws Excep */ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Exception { createIndex(IDX_DST_END); - DateTimeZone timezone = DateTimeZone.forID("CET"); + ZoneId timezone = ZoneId.of("CET"); List builders = new ArrayList<>(); - addNTimes(1, IDX_DST_END, new DateTime("2012-10-27T01:00:00", timezone), builders); - addNTimes(2, IDX_DST_END, new DateTime("2012-10-28T01:00:00", timezone), builders); // day with dst shift -1h, 25h long - addNTimes(3, IDX_DST_END, new DateTime("2012-10-29T01:00:00", timezone), builders); - addNTimes(4, IDX_DST_END, new DateTime("2012-10-30T01:00:00", timezone), builders); + DateFormatter formatter = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZone(timezone); + addNTimes(1, IDX_DST_END, DateFormatters.toZonedDateTime(formatter.parse("2012-10-27T01:00:00")), builders); + // day with dst shift -1h, 25h long + addNTimes(2, IDX_DST_END, DateFormatters.toZonedDateTime(formatter.parse("2012-10-28T01:00:00")), builders); + addNTimes(3, IDX_DST_END, DateFormatters.toZonedDateTime(formatter.parse("2012-10-29T01:00:00")), builders); + addNTimes(4, IDX_DST_END, DateFormatters.toZonedDateTime(formatter.parse("2012-10-30T01:00:00")), builders); indexRandom(true, builders); ensureSearchable(); @@ -262,11 +274,24 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Excepti List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertBucket(buckets.get(0), new DateTime("2012-10-27", timezone).toDateTime(DateTimeZone.UTC), 1L, nullValue(), null, null); - assertBucket(buckets.get(1), new DateTime("2012-10-28", timezone).toDateTime(DateTimeZone.UTC), 2L, notNullValue(), 1d, 1d / 24d); + DateFormatter dateFormatter = DateFormatter.forPattern("yyyy-MM-dd").withZone(ZoneOffset.UTC); + + ZonedDateTime expectedKeyFirstBucket = + LocalDate.from(dateFormatter.parse("2012-10-27")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null, null); + + ZonedDateTime expectedKeySecondBucket = + LocalDate.from(dateFormatter.parse("2012-10-28")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(1), expectedKeySecondBucket, 2L, notNullValue(), 1d, 1d / 24d); + // the following is normalized using a 25h bucket width - assertBucket(buckets.get(2), new DateTime("2012-10-29", timezone).toDateTime(DateTimeZone.UTC), 3L, notNullValue(), 1d, 1d / 25d); - assertBucket(buckets.get(3), new DateTime("2012-10-30", timezone).toDateTime(DateTimeZone.UTC), 4L, notNullValue(), 1d, 1d / 24d); + ZonedDateTime expectedKeyThirdBucket = + LocalDate.from(dateFormatter.parse("2012-10-29")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d, 1d / 25d); + + ZonedDateTime expectedKeyFourthBucket = + LocalDate.from(dateFormatter.parse("2012-10-30")).atStartOfDay(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d, 1d / 24d); } /** @@ -275,14 +300,15 @@ public void testSingleValuedFieldNormalised_timeZone_CET_DstEnd() throws Excepti */ public void testSingleValuedFieldNormalised_timeZone_AsiaKathmandu() throws Exception { createIndex(IDX_DST_KATHMANDU); - DateTimeZone timezone = DateTimeZone.forID("Asia/Kathmandu"); + ZoneId timezone = ZoneId.of("Asia/Kathmandu"); List builders = new ArrayList<>(); - addNTimes(1, IDX_DST_KATHMANDU, new DateTime("1985-12-31T22:30:00", timezone), builders); + DateFormatter formatter = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZone(timezone); + addNTimes(1, IDX_DST_KATHMANDU, DateFormatters.toZonedDateTime(formatter.parse("1985-12-31T22:30:00")), builders); // the shift happens during the next bucket, which includes the 45min that do not start on the full hour - addNTimes(2, IDX_DST_KATHMANDU, new DateTime("1985-12-31T23:30:00", timezone), builders); - addNTimes(3, IDX_DST_KATHMANDU, new DateTime("1986-01-01T01:30:00", timezone), builders); - addNTimes(4, IDX_DST_KATHMANDU, new DateTime("1986-01-01T02:30:00", timezone), builders); + addNTimes(2, IDX_DST_KATHMANDU, DateFormatters.toZonedDateTime(formatter.parse("1985-12-31T23:30:00")), builders); + addNTimes(3, IDX_DST_KATHMANDU, DateFormatters.toZonedDateTime(formatter.parse("1986-01-01T01:30:00")), builders); + addNTimes(4, IDX_DST_KATHMANDU, DateFormatters.toZonedDateTime(formatter.parse("1986-01-01T02:30:00")), builders); indexRandom(true, builders); ensureSearchable(); @@ -301,27 +327,36 @@ public void testSingleValuedFieldNormalised_timeZone_AsiaKathmandu() throws Exce List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(4)); - assertBucket(buckets.get(0), new DateTime("1985-12-31T22:00:00", timezone).toDateTime(DateTimeZone.UTC), 1L, nullValue(), null, - null); - assertBucket(buckets.get(1), new DateTime("1985-12-31T23:00:00", timezone).toDateTime(DateTimeZone.UTC), 2L, notNullValue(), 1d, - 1d / 60d); + DateFormatter dateFormatter = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss").withZone(ZoneOffset.UTC); + + ZonedDateTime expectedKeyFirstBucket = + LocalDateTime.from(dateFormatter.parse("1985-12-31T22:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(0), expectedKeyFirstBucket, 1L, nullValue(), null,null); + + ZonedDateTime expectedKeySecondBucket = + LocalDateTime.from(dateFormatter.parse("1985-12-31T23:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(1), expectedKeySecondBucket, 2L, notNullValue(), 1d,1d / 60d); + // the following is normalized using a 105min bucket width - assertBucket(buckets.get(2), new DateTime("1986-01-01T01:00:00", timezone).toDateTime(DateTimeZone.UTC), 3L, notNullValue(), 1d, - 1d / 105d); - assertBucket(buckets.get(3), new DateTime("1986-01-01T02:00:00", timezone).toDateTime(DateTimeZone.UTC), 4L, notNullValue(), 1d, - 1d / 60d); + ZonedDateTime expectedKeyThirdBucket = + LocalDateTime.from(dateFormatter.parse("1986-01-01T01:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(2), expectedKeyThirdBucket, 3L, notNullValue(), 1d,1d / 105d); + + ZonedDateTime expectedKeyFourthBucket = + LocalDateTime.from(dateFormatter.parse("1986-01-01T02:00:00")).atZone(timezone).withZoneSameInstant(ZoneOffset.UTC); + assertBucket(buckets.get(3), expectedKeyFourthBucket, 4L, notNullValue(), 1d,1d / 60d); } - private static void addNTimes(int amount, String index, DateTime dateTime, List builders) throws Exception { + private static void addNTimes(int amount, String index, ZonedDateTime dateTime, List builders) throws Exception { for (int i = 0; i < amount; i++) { builders.add(indexDoc(index, dateTime, 1)); } } - private static void assertBucket(Histogram.Bucket bucket, DateTime expectedKey, long expectedDocCount, + private static void assertBucket(Histogram.Bucket bucket, ZonedDateTime expectedKey, long expectedDocCount, Matcher derivativeMatcher, Double derivative, Double normalizedDerivative) { assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(expectedKey)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(expectedKey)); assertThat(bucket.getDocCount(), equalTo(expectedDocCount)); Derivative docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, derivativeMatcher); @@ -350,10 +385,10 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { Object[] propertiesDocCounts = (Object[]) ((InternalAggregation)histo).getProperty("_count"); Object[] propertiesCounts = (Object[]) ((InternalAggregation)histo).getProperty("sum.value"); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); Sum sum = bucket.getAggregations().get("sum"); @@ -361,14 +396,14 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat(sum.getValue(), equalTo(1.0)); SimpleValue deriv = bucket.getAggregations().get("deriv"); assertThat(deriv, nullValue()); - assertThat((DateTime) propertiesKeys[0], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[0], equalTo(key)); assertThat((long) propertiesDocCounts[0], equalTo(1L)); assertThat((double) propertiesCounts[0], equalTo(1.0)); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); sum = bucket.getAggregations().get("sum"); @@ -379,14 +414,14 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat(deriv.value(), equalTo(4.0)); assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty( "histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(4.0)); - assertThat((DateTime) propertiesKeys[1], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[1], equalTo(key)); assertThat((long) propertiesDocCounts[1], equalTo(2L)); assertThat((double) propertiesCounts[1], equalTo(5.0)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); sum = bucket.getAggregations().get("sum"); @@ -397,7 +432,7 @@ public void testSingleValuedFieldWithSubAggregation() throws Exception { assertThat(deriv.value(), equalTo(10.0)); assertThat(((InternalMultiBucketAggregation.InternalBucket)bucket).getProperty( "histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(10.0)); - assertThat((DateTime) propertiesKeys[2], equalTo(key)); + assertThat((ZonedDateTime) propertiesKeys[2], equalTo(key)); assertThat((long) propertiesDocCounts[2], equalTo(3L)); assertThat((double) propertiesCounts[2], equalTo(15.0)); } @@ -417,39 +452,39 @@ public void testMultiValuedField() throws Exception { List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(4)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, nullValue()); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(2.0)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0,ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(5L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(2.0)); - key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 4, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(3); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); @@ -487,29 +522,29 @@ public void testPartiallyUnmapped() throws Exception { List buckets = deriv.getBuckets(); assertThat(buckets.size(), equalTo(3)); - DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + ZonedDateTime key = ZonedDateTime.of(2012, 1, 1, 0, 0, 0, 0, ZoneOffset.UTC); Histogram.Bucket bucket = buckets.get(0); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(1L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, nullValue()); - key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 2, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(1); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(2L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); assertThat(docCountDeriv, notNullValue()); assertThat(docCountDeriv.value(), equalTo(1.0)); - key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + key = ZonedDateTime.of(2012, 3, 1, 0, 0, 0, 0, ZoneOffset.UTC); bucket = buckets.get(2); assertThat(bucket, notNullValue()); - assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat((ZonedDateTime) bucket.getKey(), equalTo(key)); assertThat(bucket.getDocCount(), equalTo(3L)); assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); docCountDeriv = bucket.getAggregations().get("deriv"); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java index 341364edbcf32..b4ae26d5f13df 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/MovFnUnitTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.NumberFieldMapper; @@ -160,7 +161,7 @@ public double execute(Map params, double[] values) { } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFormatters.toZonedDateTime(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(dateTime)).toInstant().toEpochMilli(); } /** diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java index a5a3007818f7f..fd8c0d58caf9c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/PipelineAggregationHelperTests.java @@ -20,7 +20,7 @@ package org.elasticsearch.search.aggregations.pipeline; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.Rounding; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalOrder; diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java index 5007784a3d9a9..b929f222d94fe 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfigTests.java @@ -23,9 +23,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.Script; import org.elasticsearch.test.AbstractSerializingTestCase; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +40,7 @@ protected MultiValuesSourceFieldConfig doParseInstance(XContentParser parser) th protected MultiValuesSourceFieldConfig createTestInstance() { String field = randomAlphaOfLength(10); Object missing = randomBoolean() ? randomAlphaOfLength(10) : null; - DateTimeZone timeZone = randomBoolean() ? randomDateTimeZone() : null; + ZoneId timeZone = randomBoolean() ? randomZone() : null; return new MultiValuesSourceFieldConfig.Builder() .setFieldName(field).setMissing(missing).setScript(null).setTimeZone(timeZone).build(); } diff --git a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java index 03e2a8e8248b9..e111abe0d5132 100644 --- a/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java +++ b/server/src/test/java/org/elasticsearch/search/fetch/subphase/highlight/HighlighterSearchIT.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.fetch.subphase.highlight; import com.carrotsearch.randomizedtesting.generators.RandomPicks; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.MockAnalyzer; import org.apache.lucene.analysis.MockTokenizer; @@ -35,6 +34,7 @@ import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -2815,9 +2815,11 @@ public void testHighlightQueryRewriteDatesWithNow() throws Exception { .setSettings(Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", 2)) .get()); ZonedDateTime now = ZonedDateTime.now(ZoneOffset.UTC); - indexRandom(true, client().prepareIndex("index-1", "type", "1").setSource("d", now, "field", "hello world"), - client().prepareIndex("index-1", "type", "2").setSource("d", now.minusDays(1), "field", "hello"), - client().prepareIndex("index-1", "type", "3").setSource("d", now.minusDays(2), "field", "world")); + DateFormatter formatter = DateFormatter.forPattern("strict_date_optional_time"); + indexRandom(true, + client().prepareIndex("index-1", "type", "1").setSource("d", formatter.format(now), "field", "hello world"), + client().prepareIndex("index-1", "type", "2").setSource("d", formatter.format(now.minusDays(1)), "field", "hello"), + client().prepareIndex("index-1", "type", "3").setSource("d", formatter.format(now.minusDays(2)), "field", "world")); ensureSearchable("index-1"); for (int i = 0; i < 5; i++) { final SearchResponse r1 = client().prepareSearch("index-1") diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index 8a90ca0b8ca47..fc69df5987aff 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -28,9 +28,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.document.DocumentField; -import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -557,7 +556,7 @@ public void testStoredFieldsWithoutSource() throws Exception { .field("long_field", 4L) .field("float_field", 5.0f) .field("double_field", 6.0d) - .field("date_field", DateFormatters.forPattern("dateOptionalTime").format(date)) + .field("date_field", DateFormatter.forPattern("dateOptionalTime").format(date)) .field("boolean_field", true) .field("binary_field", Base64.getEncoder().encodeToString("testing text".getBytes("UTF-8"))) .endObject()).get(); @@ -589,7 +588,7 @@ public void testStoredFieldsWithoutSource() throws Exception { assertThat(searchHit.getFields().get("long_field").getValue(), equalTo((Object) 4L)); assertThat(searchHit.getFields().get("float_field").getValue(), equalTo((Object) 5.0f)); assertThat(searchHit.getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); - String dateTime = DateFormatters.forPattern("dateOptionalTime").format(date); + String dateTime = DateFormatter.forPattern("dateOptionalTime").format(date); assertThat(searchHit.getFields().get("date_field").getValue(), equalTo((Object) dateTime)); assertThat(searchHit.getFields().get("boolean_field").getValue(), equalTo((Object) Boolean.TRUE)); assertThat(searchHit.getFields().get("binary_field").getValue(), equalTo(new BytesArray("testing text" .getBytes("UTF8")))); @@ -769,7 +768,7 @@ public void testDocValueFields() throws Exception { .field("long_field", 4L) .field("float_field", 5.0f) .field("double_field", 6.0d) - .field("date_field", DateFormatters.forPattern("dateOptionalTime").format(date)) + .field("date_field", DateFormatter.forPattern("dateOptionalTime").format(date)) .field("boolean_field", true) .field("binary_field", new byte[] {42, 100}) .field("ip_field", "::1") @@ -869,7 +868,7 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo((Object) 5.0)); assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo((Object) 6.0d)); assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(DateFormatters.forPattern("dateOptionalTime").format(date))); + equalTo(DateFormatter.forPattern("dateOptionalTime").format(date))); assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); @@ -898,9 +897,8 @@ public void testDocValueFields() throws Exception { assertThat(searchResponse.getHits().getAt(0).getFields().get("long_field").getValue(), equalTo("4.0")); assertThat(searchResponse.getHits().getAt(0).getFields().get("float_field").getValue(), equalTo("5.0")); assertThat(searchResponse.getHits().getAt(0).getFields().get("double_field").getValue(), equalTo("6.0")); - // TODO: switch to java date formatter, but will require special casing java 8 as there is a bug with epoch formatting there assertThat(searchResponse.getHits().getAt(0).getFields().get("date_field").getValue(), - equalTo(Joda.forPattern("epoch_millis").format(date))); + equalTo(DateFormatter.forPattern("epoch_millis").format(date))); } public void testScriptFields() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java index 0c2b0829c5f75..b7d51798ab7df 100644 --- a/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java +++ b/server/src/test/java/org/elasticsearch/search/query/SearchQueryIT.java @@ -25,9 +25,11 @@ import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.bootstrap.JavaVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -50,11 +52,10 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.joda.time.DateTime; -import org.joda.time.DateTimeZone; -import org.joda.time.format.ISODateTimeFormat; import java.io.IOException; +import java.time.Instant; +import java.time.ZoneId; import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; @@ -433,14 +434,14 @@ public void testDateRangeInQueryStringWithTimeZone_7880() { "type", "past", "type=date" )); - DateTimeZone timeZone = randomDateTimeZone(); - String now = ISODateTimeFormat.dateTime().print(new DateTime(timeZone)); - logger.info(" --> Using time_zone [{}], now is [{}]", timeZone.getID(), now); + ZoneId timeZone = randomZone(); + String now = DateFormatter.forPattern("strict_date_optional_time").format(Instant.now().atZone(timeZone)); + logger.info(" --> Using time_zone [{}], now is [{}]", timeZone.getId(), now); client().prepareIndex("test", "type", "1").setSource("past", now).get(); refresh(); SearchResponse searchResponse = client().prepareSearch().setQuery(queryStringQuery("past:[now-1m/m TO now+1m/m]") - .timeZone(timeZone.getID())).get(); + .timeZone(timeZone.getId())).get(); assertHitCount(searchResponse, 1L); } @@ -1555,21 +1556,20 @@ public void testQueryStringWithSlopAndFields() { } } - public void testDateProvidedAsNumber() throws ExecutionException, InterruptedException { + public void testDateProvidedAsNumber() throws InterruptedException { createIndex("test"); assertAcked(client().admin().indices().preparePutMapping("test").setType("type") - .setSource("field", "type=date,format=epoch_millis").get()); - indexRandom(true, client().prepareIndex("test", "type", "1").setSource("field", -1000000000001L), - client().prepareIndex("test", "type", "2").setSource("field", -1000000000000L), - client().prepareIndex("test", "type", "3").setSource("field", -999999999999L), - client().prepareIndex("test", "type", "4").setSource("field", -1000000000001.0123456789), - client().prepareIndex("test", "type", "5").setSource("field", -1000000000000.0123456789), - client().prepareIndex("test", "type", "6").setSource("field", -999999999999.0123456789)); - - - assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").lte(-1000000000000L)).get(), 4); - assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").lte(-999999999999L)).get(), 6); - + .setSource("field", "type=date,format=epoch_millis").get()); + indexRandom(true, + client().prepareIndex("test", "type", "1").setSource("field", 1000000000001L), + client().prepareIndex("test", "type", "2").setSource("field", 1000000000000L), + client().prepareIndex("test", "type", "3").setSource("field", 999999999999L), + client().prepareIndex("test", "type", "4").setSource("field", 1000000000002L), + client().prepareIndex("test", "type", "5").setSource("field", 1000000000003L), + client().prepareIndex("test", "type", "6").setSource("field", 999999999999L)); + + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").gte(1000000000000L)).get(), 4); + assertHitCount(client().prepareSearch("test").setSize(0).setQuery(rangeQuery("field").gte(999999999999L)).get(), 6); } public void testRangeQueryWithTimeZone() throws Exception { @@ -1582,7 +1582,7 @@ public void testRangeQueryWithTimeZone() throws Exception { client().prepareIndex("test", "type1", "3").setSource("date", "2014-01-01T01:00:00", "num", 3), // Now in UTC+1 client().prepareIndex("test", "type1", "4") - .setSource("date", DateTime.now(DateTimeZone.forOffsetHours(1)).getMillis(), "num", 4)); + .setSource("date", Instant.now().atZone(ZoneOffset.ofHours(1)).toInstant().toEpochMilli(), "num", 4)); SearchResponse searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01T00:00:00").to("2014-01-01T00:59:00")) @@ -1634,12 +1634,6 @@ public void testRangeQueryWithTimeZone() throws Exception { assertHitCount(searchResponse, 1L); assertThat(searchResponse.getHits().getAt(0).getId(), is("3")); - // When we use long values, it means we have ms since epoch UTC based so we don't apply any transformation - expectThrows(SearchPhaseExecutionException.class, () -> - client().prepareSearch("test") - .setQuery(QueryBuilders.rangeQuery("date").from(1388534400000L).to(1388537940999L).timeZone("+01:00")) - .get()); - searchResponse = client().prepareSearch("test") .setQuery(QueryBuilders.rangeQuery("date").from("2014-01-01").to("2014-01-01T00:59:00").timeZone("-01:00")) .get(); @@ -1653,6 +1647,36 @@ public void testRangeQueryWithTimeZone() throws Exception { assertThat(searchResponse.getHits().getAt(0).getId(), is("4")); } + public void testRangeQueryWithLocaleMapping() throws Exception { + assumeTrue("need java 9 for testing ",JavaVersion.current().compareTo(JavaVersion.parse("9")) >= 0); + + assertAcked(prepareCreate("test") + .addMapping("type1", jsonBuilder().startObject().startObject("properties").startObject("date_field") + .field("type", "date") + .field("format", "E, d MMM yyyy HH:mm:ss Z") + .field("locale", "de") + .endObject().endObject().endObject())); + + indexRandom(true, + client().prepareIndex("test", "type1", "1").setSource("date_field", "Mi., 06 Dez. 2000 02:55:00 -0800"), + client().prepareIndex("test", "type1", "2").setSource("date_field", "Do., 07 Dez. 2000 02:55:00 -0800") + ); + + SearchResponse searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("date_field") + .gte("Di., 05 Dez. 2000 02:55:00 -0800") + .lte("Do., 07 Dez. 2000 00:00:00 -0800")) + .get(); + assertHitCount(searchResponse, 1L); + + searchResponse = client().prepareSearch("test") + .setQuery(QueryBuilders.rangeQuery("date_field") + .gte("Di., 05 Dez. 2000 02:55:00 -0800") + .lte("Fr., 08 Dez. 2000 00:00:00 -0800")) + .get(); + assertHitCount(searchResponse, 2L); + } + public void testSearchEmptyDoc() { assertAcked(prepareCreate("test").setSettings("{\"index.analysis.analyzer.default.type\":\"keyword\"}", XContentType.JSON)); client().prepareIndex("test", "type1", "1").setSource("{}", XContentType.JSON).get(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java index 22ed586043d83..58107cbc7318a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractSerializingTestCase.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentType; import java.io.IOException; +import java.util.Date; import java.util.function.Predicate; import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; @@ -101,4 +102,10 @@ protected boolean assertToXContentEquivalence() { return true; } + /** + * @return a random date between 1970 and ca 2065 + */ + protected Date randomDate() { + return new Date(randomLongBetween(0, 3000000000000L)); + } } diff --git a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java index 3f88ce13f697d..476ba8d26bb0a 100644 --- a/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java +++ b/x-pack/license-tools/src/test/java/org/elasticsearch/license/licensor/TestUtils.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.nio.file.Path; +import java.time.ZoneOffset; import java.util.UUID; import static com.carrotsearch.randomizedtesting.RandomizedTest.randomBoolean; @@ -34,9 +35,8 @@ public class TestUtils { public static final String PUBLIC_KEY_RESOURCE = "/public.key"; public static final String PRIVATE_KEY_RESOURCE = "/private.key"; - private static final DateFormatter formatDateTimeFormatter = - DateFormatter.forPattern("yyyy-MM-dd"); - private static final DateMathParser dateMathParser = formatDateTimeFormatter.toDateMathParser(); + private static final DateFormatter dateFormatter = DateFormatter.forPattern("yyyy-MM-dd"); + private static final DateMathParser dateMathParser = dateFormatter.toDateMathParser(); public static String dumpLicense(License license) throws Exception { XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); @@ -49,11 +49,11 @@ public static String dumpLicense(License license) throws Exception { } public static String dateMathString(String time, final long now) { - return formatDateTimeFormatter.formatMillis(dateMathParser.parse(time, () -> now)); + return dateFormatter.format(dateMathParser.parse(time, () -> now).atZone(ZoneOffset.UTC)); } public static long dateMath(String time, final long now) { - return dateMathParser.parse(time, () -> now); + return dateMathParser.parse(time, () -> now).toEpochMilli(); } public static LicenseSpec generateRandomLicenseSpec(int version) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java index 2769b76b65f10..9cec2b4cc7c1f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/DateUtils.java @@ -5,39 +5,37 @@ */ package org.elasticsearch.license; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.time.DateFormatter; -import org.joda.time.MutableDateTime; -import org.joda.time.format.DateTimeFormatter; -import org.joda.time.format.ISODateTimeFormat; +import org.elasticsearch.common.time.DateFormatters; import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; public class DateUtils { private static final DateFormatter dateOnlyFormatter = DateFormatter.forPattern("yyyy-MM-dd").withZone(ZoneOffset.UTC); - - private static final DateTimeFormatter dateTimeFormatter = ISODateTimeFormat.dateTime().withZoneUTC(); + private static final DateFormatter dateTimeFormatter = DateFormatter.forPattern("strict_date_time").withZone(ZoneOffset.UTC); public static long endOfTheDay(String date) { try { // Try parsing using complete date/time format - return dateTimeFormatter.parseDateTime(date).getMillis(); - } catch (IllegalArgumentException ex) { - // Fall back to the date only format - MutableDateTime dateTime = new MutableDateTime(dateOnlyFormatter.parseMillis(date)); - dateTime.millisOfDay().set(dateTime.millisOfDay().getMaximumValue()); - return dateTime.getMillis(); + return dateTimeFormatter.parseMillis(date); + } catch (ElasticsearchParseException | IllegalArgumentException ex) { + ZonedDateTime dateTime = DateFormatters.toZonedDateTime(dateOnlyFormatter.parse(date)); + dateTime.with(ChronoField.MILLI_OF_DAY, ChronoField.MILLI_OF_DAY.range().getMaximum()); + return dateTime.toInstant().toEpochMilli(); } } public static long beginningOfTheDay(String date) { try { // Try parsing using complete date/time format - return dateTimeFormatter.parseDateTime(date).getMillis(); - } catch (IllegalArgumentException ex) { + return dateTimeFormatter.parseMillis(date); + } catch (ElasticsearchParseException | IllegalArgumentException ex) { // Fall back to the date only format - return dateOnlyFormatter.parseMillis(date); + return DateFormatters.toZonedDateTime(dateOnlyFormatter.parse(date)).toInstant().toEpochMilli(); } - } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java index ef284e1394205..15531fce7a1ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetOverallBucketsAction.java @@ -91,7 +91,7 @@ static long parseDateOrThrow(String date, ParseField paramName, LongSupplier now DateMathParser dateMathParser = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); try { - return dateMathParser.parse(date, now); + return dateMathParser.parse(date, now).toEpochMilli(); } catch (Exception e) { String msg = Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, paramName.getPreferredName(), date); throw new ElasticsearchParseException(msg, e); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java index d33280dcac3d2..12faa157eeb9a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -162,7 +162,7 @@ static long parseDateOrThrow(String date, ParseField paramName, LongSupplier now DateMathParser dateMathParser = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.toDateMathParser(); try { - return dateMathParser.parse(date, now); + return dateMathParser.parse(date, now).toEpochMilli(); } catch (Exception e) { String msg = Messages.getMessage(Messages.REST_INVALID_DATETIME_PARAMS, paramName.getPreferredName(), date); throw new ElasticsearchParseException(msg, e); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java index f0ba07ad15c68..bb1faeddd8298 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtils.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; @@ -22,9 +22,9 @@ import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneOffset; import java.util.Arrays; import java.util.Collection; import java.util.concurrent.TimeUnit; @@ -128,7 +128,7 @@ public static long getHistogramIntervalMillis(AggregationBuilder histogramAggreg * an {@link ElasticsearchException} with the validation error */ private static long validateAndGetDateHistogramInterval(DateHistogramAggregationBuilder dateHistogram) { - if (dateHistogram.timeZone() != null && dateHistogram.timeZone().equals(DateTimeZone.UTC) == false) { + if (dateHistogram.timeZone() != null && dateHistogram.timeZone().normalized().equals(ZoneOffset.UTC) == false) { throw ExceptionsHelper.badRequestException("ML requires date_histogram.time_zone to be UTC"); } @@ -141,7 +141,7 @@ private static long validateAndGetDateHistogramInterval(DateHistogramAggregation public static long validateAndGetCalendarInterval(String calendarInterval) { TimeValue interval; - DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(calendarInterval); + Rounding.DateTimeUnit dateTimeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(calendarInterval); if (dateTimeUnit != null) { switch (dateTimeUnit) { case WEEK_OF_WEEKYEAR: @@ -161,7 +161,7 @@ public static long validateAndGetCalendarInterval(String calendarInterval) { break; case MONTH_OF_YEAR: case YEAR_OF_CENTURY: - case QUARTER: + case QUARTER_OF_YEAR: throw ExceptionsHelper.badRequestException(invalidDateHistogramCalendarIntervalMessage(calendarInterval)); default: throw ExceptionsHelper.badRequestException("Unexpected dateTimeUnit [" + dateTimeUnit + "]"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java index 6434b0ff2b98d..ea0994dad717c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/time/TimeUtils.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.ml.utils.time; +import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; @@ -15,6 +16,7 @@ import java.util.concurrent.TimeUnit; public final class TimeUtils { + private TimeUtils() { // Do nothing } @@ -55,7 +57,7 @@ public static long dateStringToEpoch(String date) { try { return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(date); - } catch (IllegalArgumentException e) { + } catch (ElasticsearchParseException | IllegalArgumentException e) { } // Could not do the conversion return -1; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java index 166322b93722c..f4fee8acc3d1f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfig.java @@ -9,12 +9,11 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilities; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.rounding.DateTimeUnit; -import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -22,9 +21,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Map; import java.util.Objects; @@ -82,7 +81,7 @@ public DateHistogramGroupConfig(final String field, final DateHistogramInterval * The {@code field} and {@code interval} are required to compute the date histogram for the rolled up documents. * The {@code delay} is optional and can be set to {@code null}. It defines how long to wait before rolling up new documents. * The {@code timeZone} is optional and can be set to {@code null}. When configured, the time zone value is resolved using - * ({@link DateTimeZone#forID(String)} and must match a time zone identifier provided by the Joda Time library. + * ({@link ZoneId#of(String)} and must match a time zone identifier. *

* @param field the name of the date field to use for the date histogram (required) * @param interval the interval to use for the date histogram (required) @@ -229,23 +228,14 @@ public static DateHistogramGroupConfig fromXContent(final XContentParser parser) } private static Rounding createRounding(final String expr, final String timeZone) { - DateTimeUnit timeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(expr); + Rounding.DateTimeUnit timeUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(expr); final Rounding.Builder rounding; if (timeUnit != null) { rounding = new Rounding.Builder(timeUnit); } else { rounding = new Rounding.Builder(TimeValue.parseTimeValue(expr, "createRounding")); } - rounding.timeZone(toDateTimeZone(timeZone)); + rounding.timeZone(ZoneId.of(timeZone)); return rounding.build(); } - - private static DateTimeZone toDateTimeZone(final String timezone) { - try { - return DateTimeZone.forOffsetHours(Integer.parseInt(timezone)); - } catch (NumberFormatException e) { - return DateTimeZone.forID(timezone); - } - } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java index f81c7955abbc4..e67baeaad3916 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/WatcherDateTimeUtils.java @@ -91,7 +91,7 @@ public static DateTime parseDateMathOrNull(String fieldName, XContentParser pars } public static DateTime parseDateMath(String valueString, DateTimeZone timeZone, final Clock clock) { - return new DateTime(dateMathParser.parse(valueString, clock::millis), timeZone); + return new DateTime(dateMathParser.parse(valueString, clock::millis).toEpochMilli(), timeZone); } public static DateTime parseDate(String fieldName, XContentParser parser, DateTimeZone timeZone) throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java index 6812aca474749..2219a78055544 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/index/engine/RewriteCachingDirectoryReaderTests.java @@ -15,9 +15,9 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneOffset; public class RewriteCachingDirectoryReaderTests extends ESTestCase { @@ -92,15 +92,15 @@ public void testIsWithinQuery() throws IOException { dateFieldType.setName("test"); QueryRewriteContext context = new QueryRewriteContext(xContentRegistry(), writableRegistry(), null, () -> 0); MappedFieldType.Relation relation = dateFieldType.isFieldWithinQuery(cachingDirectoryReader, 0, 10, - true, true, DateTimeZone.UTC, null, context); + true, true, ZoneOffset.UTC, null, context); assertEquals(relation, MappedFieldType.Relation.WITHIN); relation = dateFieldType.isFieldWithinQuery(cachingDirectoryReader, 3, 11, - true, true, DateTimeZone.UTC, null, context); + true, true, ZoneOffset.UTC, null, context); assertEquals(relation, MappedFieldType.Relation.INTERSECTS); relation = dateFieldType.isFieldWithinQuery(cachingDirectoryReader, 10, 11, - false, true, DateTimeZone.UTC, null, context); + false, true, ZoneOffset.UTC, null, context); assertEquals(relation, MappedFieldType.Relation.DISJOINT); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java index 230fd75fbb958..47dad7e18eb32 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java @@ -29,6 +29,7 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -51,11 +52,11 @@ public class TestUtils { private static final DateMathParser dateMathParser = formatDateTimeFormatter.toDateMathParser(); public static String dateMathString(String time, final long now) { - return formatDateTimeFormatter.formatMillis(dateMathParser.parse(time, () -> now)); + return formatDateTimeFormatter.format(dateMathParser.parse(time, () -> now).atZone(ZoneOffset.UTC)); } public static long dateMath(String time, final long now) { - return dateMathParser.parse(time, () -> now); + return dateMathParser.parse(time, () -> now).toEpochMilli(); } public static LicenseSpec generateRandomLicenseSpec(int version) { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java index cb2f13e804253..788870013885e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfigTests.java @@ -48,15 +48,15 @@ import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig.Mode; import org.elasticsearch.xpack.core.ml.job.messages.Messages; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.TimeZone; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -449,7 +449,7 @@ public void testBuild_GivenHistogramWithDefaultInterval() { public void testBuild_GivenDateHistogramWithInvalidTimeZone() { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") - .interval(300000L).timeZone(DateTimeZone.forTimeZone(TimeZone.getTimeZone("EST"))).subAggregation(maxTime); + .interval(300000L).timeZone(ZoneId.of("CET")).subAggregation(maxTime); ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> createDatafeedWithDateHistogram(dateHistogram)); @@ -650,7 +650,7 @@ public void testSerializationOfComplexAggs() throws IOException { new Script("params.bytes > 0 ? params.bytes : null")); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("histogram_buckets") - .field("timestamp").interval(300000).timeZone(DateTimeZone.UTC) + .field("timestamp").interval(300000).timeZone(ZoneOffset.UTC) .subAggregation(maxTime) .subAggregation(avgAggregationBuilder) .subAggregation(derivativePipelineAggregationBuilder) @@ -701,7 +701,7 @@ public void testSerializationOfComplexAggsBetweenVersions() throws IOException { new Script("params.bytes > 0 ? params.bytes : null")); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("histogram_buckets") - .field("timestamp").interval(300000).timeZone(DateTimeZone.UTC) + .field("timestamp").interval(300000).timeZone(ZoneOffset.UTC) .subAggregation(maxTime) .subAggregation(avgAggregationBuilder) .subAggregation(derivativePipelineAggregationBuilder) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java index 7770def0fae9a..532468216e5aa 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/extractor/ExtractorUtilsTests.java @@ -14,11 +14,12 @@ import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.MaxAggregationBuilder; import org.elasticsearch.test.ESTestCase; -import org.joda.time.DateTimeZone; -import java.util.TimeZone; +import java.time.ZoneId; +import java.time.ZoneOffset; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; public class ExtractorUtilsTests extends ESTestCase { @@ -73,13 +74,21 @@ public void testGetHistogramAggregation_MissingHistogramAgg() { public void testGetHistogramIntervalMillis_GivenDateHistogramWithInvalidTimeZone() { MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") - .interval(300000L).timeZone(DateTimeZone.forTimeZone(TimeZone.getTimeZone("EST"))).subAggregation(maxTime); + .interval(300000L).timeZone(ZoneId.of("CET")).subAggregation(maxTime); ElasticsearchException e = expectThrows(ElasticsearchException.class, () -> ExtractorUtils.getHistogramIntervalMillis(dateHistogram)); assertThat(e.getMessage(), equalTo("ML requires date_histogram.time_zone to be UTC")); } + public void testGetHistogramIntervalMillis_GivenUtcTimeZones() { + MaxAggregationBuilder maxTime = AggregationBuilders.max("time").field("time"); + ZoneId zone = randomFrom(ZoneOffset.UTC, ZoneId.of("UTC")); + DateHistogramAggregationBuilder dateHistogram = AggregationBuilders.dateHistogram("bucket").field("time") + .interval(300000L).timeZone(zone).subAggregation(maxTime); + assertThat(ExtractorUtils.getHistogramIntervalMillis(dateHistogram), is(300_000L)); + } + public void testIsHistogram() { assertTrue(ExtractorUtils.isHistogram(AggregationBuilders.dateHistogram("time"))); assertTrue(ExtractorUtils.isHistogram(AggregationBuilders.histogram("time"))); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java index d892eb550a17a..605ea6e901a90 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/ConfigTestHelpers.java @@ -28,7 +28,7 @@ import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; import static com.carrotsearch.randomizedtesting.generators.RandomPicks.randomFrom; import static com.carrotsearch.randomizedtesting.generators.RandomStrings.randomAsciiAlphanumOfLengthBetween; -import static org.elasticsearch.test.ESTestCase.randomDateTimeZone; +import static org.elasticsearch.test.ESTestCase.randomZone; public class ConfigTestHelpers { @@ -71,7 +71,7 @@ public static DateHistogramGroupConfig randomDateHistogramGroupConfig(final Rand final String field = randomField(random); final DateHistogramInterval interval = randomInterval(); final DateHistogramInterval delay = random.nextBoolean() ? randomInterval() : null; - final String timezone = random.nextBoolean() ? randomDateTimeZone().toString() : null; + String timezone = random.nextBoolean() ? randomZone().getId() : null; return new DateHistogramGroupConfig(field, interval, delay, timezone); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java index 415e1a00a60cf..95df682ff5e14 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/rollup/job/DateHistogramGroupConfigSerializingTests.java @@ -14,9 +14,9 @@ import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; -import org.joda.time.DateTimeZone; import java.io.IOException; +import java.time.ZoneId; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -155,28 +155,28 @@ public void testBwcSerialization() throws IOException { DateHistogramInterval interval = new DateHistogramInterval(in); String field = in.readString(); DateHistogramInterval delay = in.readOptionalWriteable(DateHistogramInterval::new); - DateTimeZone timeZone = in.readTimeZone(); + ZoneId timeZone = in.readZoneId(); - assertEqualInstances(reference, new DateHistogramGroupConfig(field, interval, delay, timeZone.getID())); + assertEqualInstances(reference, new DateHistogramGroupConfig(field, interval, delay, timeZone.getId())); } for (int runs = 0; runs < NUMBER_OF_TEST_RUNS; runs++) { final String field = ConfigTestHelpers.randomField(random()); final DateHistogramInterval interval = ConfigTestHelpers.randomInterval(); final DateHistogramInterval delay = randomBoolean() ? ConfigTestHelpers.randomInterval() : null; - final DateTimeZone timezone = randomDateTimeZone(); + final ZoneId timezone = randomZone(); // previous way to serialize a DateHistogramGroupConfig final BytesStreamOutput out = new BytesStreamOutput(); interval.writeTo(out); out.writeString(field); out.writeOptionalWriteable(delay); - out.writeTimeZone(timezone); + out.writeZoneId(timezone); final StreamInput in = out.bytes().streamInput(); DateHistogramGroupConfig deserialized = new DateHistogramGroupConfig(in); - assertEqualInstances(new DateHistogramGroupConfig(field, interval, delay, timezone.getID()), deserialized); + assertEqualInstances(new DateHistogramGroupConfig(field, interval, delay, timezone.getId()), deserialized); } } } diff --git a/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java index 1453f59ed43e4..454a3eb06e5a7 100644 --- a/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java +++ b/x-pack/plugin/ml/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/ml/transforms/PainlessDomainSplitIT.java @@ -13,8 +13,10 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.ml.MachineLearning; -import org.joda.time.DateTime; +import java.time.Clock; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; @@ -266,7 +268,7 @@ public void testHRDSplit() throws Exception { "\"time\": { \"type\": \"date\" } }"); // Index some data - DateTime baseTime = new DateTime().minusYears(1); + ZonedDateTime baseTime = ZonedDateTime.now(Clock.systemDefaultZone()).minusYears(1); TestConfiguration test = tests.get(randomInt(tests.size()-1)); // domainSplit() tests had subdomain, testHighestRegisteredDomainCases() did not, so we need a special case for sub @@ -276,18 +278,20 @@ public void testHRDSplit() throws Exception { for (int i = 0; i < 100; i++) { - DateTime time = baseTime.plusHours(i); + ZonedDateTime time = baseTime.plusHours(i); if (i == 64) { // Anomaly has 100 docs, but we don't care about the value for (int j = 0; j < 100; j++) { - Request createDocRequest = new Request("PUT", "/painless/_doc/" + time.toDateTimeISO() + "_" + j); - createDocRequest.setJsonEntity("{\"domain\": \"" + "bar.bar.com\", \"time\": \"" + time.toDateTimeISO() + "\"}"); + String formattedTime = time.format(DateTimeFormatter.ISO_DATE_TIME); + Request createDocRequest = new Request("PUT", "/painless/_doc/" + formattedTime + "_" + j); + createDocRequest.setJsonEntity("{\"domain\": \"" + "bar.bar.com\", \"time\": \"" + formattedTime + "\"}"); client().performRequest(createDocRequest); } } else { // Non-anomalous values will be what's seen when the anomaly is reported - Request createDocRequest = new Request("PUT", "/painless/_doc/" + time.toDateTimeISO()); - createDocRequest.setJsonEntity("{\"domain\": \"" + test.hostName + "\", \"time\": \"" + time.toDateTimeISO() + "\"}"); + String formattedTime = time.format(DateTimeFormatter.ISO_DATE_TIME); + Request createDocRequest = new Request("PUT", "/painless/_doc/" + formattedTime); + createDocRequest.setJsonEntity("{\"domain\": \"" + test.hostName + "\", \"time\": \"" + formattedTime + "\"}"); client().performRequest(createDocRequest); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java index 190933b1e9316..5b9852ba4fddc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java @@ -16,9 +16,9 @@ import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; -import org.joda.time.DateTime; -import org.joda.time.chrono.ISOChronology; +import java.time.Clock; +import java.time.ZonedDateTime; import java.util.Objects; import java.util.Random; import java.util.concurrent.ScheduledFuture; @@ -70,9 +70,14 @@ public MlDailyMaintenanceService(ClusterName clusterName, ThreadPool threadPool, private static TimeValue delayToNextTime(ClusterName clusterName) { Random random = new Random(clusterName.hashCode()); int minutesOffset = random.ints(0, MAX_TIME_OFFSET_MINUTES).findFirst().getAsInt(); - DateTime now = DateTime.now(ISOChronology.getInstance()); - DateTime next = now.plusDays(1).withTimeAtStartOfDay().plusMinutes(30).plusMinutes(minutesOffset); - return TimeValue.timeValueMillis(next.getMillis() - now.getMillis()); + + ZonedDateTime now = ZonedDateTime.now(Clock.systemDefaultZone()); + ZonedDateTime next = now.plusDays(1) + .toLocalDate() + .atStartOfDay(now.getZone()) + .plusMinutes(30) + .plusMinutes(minutesOffset); + return TimeValue.timeValueMillis(next.toInstant().toEpochMilli() - now.toInstant().toEpochMilli()); } public void start() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java index 35878f1199586..85f2489e6b0e5 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedJob.java @@ -128,7 +128,6 @@ Long runLookBack(long startTime, Long endTime) throws Exception { auditor.info(jobId, msg); LOGGER.info("[{}] {}", jobId, msg); - FlushJobAction.Request request = new FlushJobAction.Request(jobId); request.setCalcInterim(true); run(lookbackStartTimeMs, lookbackEnd, request); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java index 86fe439ac16cb..f8fa3b1874808 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/delayeddatacheck/DatafeedDelayedDataDetector.java @@ -17,11 +17,11 @@ import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; import org.elasticsearch.xpack.core.ml.action.util.PageParams; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; -import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; import org.elasticsearch.xpack.core.ml.job.results.Bucket; import org.elasticsearch.xpack.core.ml.utils.Intervals; -import org.joda.time.DateTime; +import org.elasticsearch.xpack.ml.datafeed.delayeddatacheck.DelayedDataDetectorFactory.BucketWithMissingData; +import java.time.ZonedDateTime; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -131,8 +131,8 @@ private Map checkCurrentBucketEventCount(long start, long end) { } private static long toHistogramKeyToEpoch(Object key) { - if (key instanceof DateTime) { - return ((DateTime)key).getMillis(); + if (key instanceof ZonedDateTime) { + return ((ZonedDateTime)key).toInstant().toEpochMilli(); } else if (key instanceof Double) { return ((Double)key).longValue(); } else if (key instanceof Long){ diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java index db8dea22675f2..8cf3ed39651d6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationToJsonProcessor.java @@ -21,10 +21,10 @@ import org.elasticsearch.search.aggregations.metrics.Percentiles; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.job.messages.Messages; -import org.joda.time.DateTime; import java.io.IOException; import java.io.OutputStream; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; @@ -210,15 +210,15 @@ private void processDateHistogram(Histogram agg) throws IOException { } /* - * Date Histograms have a {@link DateTime} object as the key, + * Date Histograms have a {@link ZonedDateTime} object as the key, * Histograms have either a Double or Long. */ private long toHistogramKeyToEpoch(Object key) { - if (key instanceof DateTime) { - return ((DateTime)key).getMillis(); + if (key instanceof ZonedDateTime) { + return ((ZonedDateTime)key).toInstant().toEpochMilli(); } else if (key instanceof Double) { return ((Double)key).longValue(); - } else if (key instanceof Long){ + } else if (key instanceof Long) { return (Long)key; } else { throw new IllegalStateException("Histogram key [" + key + "] cannot be converted to a timestamp"); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java index 232cd53a359ce..4223bff49825e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedField.java @@ -8,7 +8,6 @@ import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; -import org.joda.time.base.BaseDateTime; import java.util.List; import java.util.Map; @@ -112,8 +111,6 @@ public Object[] value(SearchHit hit) { } if (value[0] instanceof String) { // doc_value field with the epoch_millis format value[0] = Long.parseLong((String) value[0]); - } else if (value[0] instanceof BaseDateTime) { // script field - value[0] = ((BaseDateTime) value[0]).getMillis(); } else if (value[0] instanceof Long == false) { // pre-6.0 field throw new IllegalStateException("Unexpected value for a time field: " + value[0].getClass()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProvider.java index 204ae42720433..dd9a6229ec887 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/overallbuckets/OverallBucketsProvider.java @@ -14,8 +14,8 @@ import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.job.results.OverallBucket; import org.elasticsearch.xpack.core.ml.job.results.Result; -import org.joda.time.DateTime; +import java.time.ZonedDateTime; import java.util.ArrayList; import java.util.Date; import java.util.List; @@ -64,8 +64,8 @@ public List computeOverallBuckets(Histogram histogram) { } private static Date getHistogramBucketTimestamp(Histogram.Bucket bucket) { - DateTime bucketTimestamp = (DateTime) bucket.getKey(); - return new Date(bucketTimestamp.getMillis()); + ZonedDateTime bucketTimestamp = (ZonedDateTime) bucket.getKey(); + return new Date(bucketTimestamp.toInstant().toEpochMilli()); } static class TopNScores extends PriorityQueue { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java index b595c564ab9aa..be50114fc46e0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/AbstractExpiredJobDataRemover.java @@ -15,9 +15,9 @@ import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.ml.job.persistence.BatchedJobsIterator; import org.elasticsearch.xpack.ml.utils.VolatileCursorIterator; -import org.joda.time.DateTime; -import org.joda.time.chrono.ISOChronology; +import java.time.Clock; +import java.time.Instant; import java.util.Deque; import java.util.Iterator; import java.util.List; @@ -71,7 +71,7 @@ private WrappedBatchedJobsIterator newJobIterator() { } private long calcCutoffEpochMs(long retentionDays) { - long nowEpochMs = DateTime.now(ISOChronology.getInstance()).getMillis(); + long nowEpochMs = Instant.now(Clock.systemDefaultZone()).toEpochMilli(); return nowEpochMs - new TimeValue(retentionDays, TimeUnit.DAYS).getMillis(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java index 3de9795deb335..3225a7eb9212e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java @@ -35,11 +35,11 @@ import org.elasticsearch.xpack.core.ml.job.results.ForecastRequestStats; import org.elasticsearch.xpack.core.ml.job.results.Result; import org.elasticsearch.xpack.ml.MachineLearning; -import org.joda.time.DateTime; -import org.joda.time.chrono.ISOChronology; import java.io.IOException; import java.io.InputStream; +import java.time.Clock; +import java.time.Instant; import java.util.ArrayList; import java.util.List; import java.util.Objects; @@ -66,7 +66,7 @@ public class ExpiredForecastsRemover implements MlDataRemover { public ExpiredForecastsRemover(Client client, ThreadPool threadPool) { this.client = Objects.requireNonNull(client); this.threadPool = Objects.requireNonNull(threadPool); - this.cutoffEpochMs = DateTime.now(ISOChronology.getInstance()).getMillis(); + this.cutoffEpochMs = Instant.now(Clock.systemDefaultZone()).toEpochMilli(); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java index 1e5e6fa652db1..ad999daafb254 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/ExtractedFieldTests.java @@ -8,9 +8,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.ml.datafeed.extractor.fields.ExtractedField; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; -import org.joda.time.DateTime; import java.util.Arrays; @@ -98,16 +96,16 @@ public void testNewTimeFieldGivenSource() { expectThrows(IllegalArgumentException.class, () -> ExtractedField.newTimeField("time", ExtractedField.ExtractionMethod.SOURCE)); } - public void testValueGivenTimeField() { + public void testValueGivenStringTimeField() { final long millis = randomLong(); - final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", new DateTime(millis)).build(); + final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", Long.toString(millis)).build(); final ExtractedField timeField = ExtractedField.newTimeField("time", ExtractedField.ExtractionMethod.DOC_VALUE); assertThat(timeField.value(hit), equalTo(new Object[] { millis })); } - public void testValueGivenStringTimeField() { + public void testValueGivenLongTimeField() { final long millis = randomLong(); - final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", Long.toString(millis)).build(); + final SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", millis).build(); final ExtractedField timeField = ExtractedField.newTimeField("time", ExtractedField.ExtractionMethod.DOC_VALUE); assertThat(timeField.value(hit), equalTo(new Object[] { millis })); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java index 1fd6db3de566a..20dd49029b3ea 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/extractor/fields/TimeBasedExtractedFieldsTests.java @@ -17,7 +17,6 @@ import org.elasticsearch.xpack.core.ml.job.config.Detector; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.ml.test.SearchHitBuilder; -import org.joda.time.DateTime; import java.util.Arrays; import java.util.Collections; @@ -64,13 +63,6 @@ public void testAllTypesOfFields() { assertThat(extractedFields.getSourceFields(), equalTo(new String[] {"src1", "src2"})); } - public void testTimeFieldValue() { - long millis = randomLong(); - SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", new DateTime(millis)).build(); - TimeBasedExtractedFields extractedFields = new TimeBasedExtractedFields(timeField, Collections.singletonList(timeField)); - assertThat(extractedFields.timeFieldValue(hit), equalTo(millis)); - } - public void testStringTimeFieldValue() { long millis = randomLong(); SearchHit hit = new SearchHitBuilder(randomInt()).addField("time", Long.toString(millis)).build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java index 6e256680eca55..f6f75fe722dac 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/filestructurefinder/TimestampFormatFinderTests.java @@ -6,7 +6,7 @@ package org.elasticsearch.xpack.ml.filestructurefinder; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.time.DateFormatters; +import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.xpack.ml.filestructurefinder.TimestampFormatFinder.TimestampMatch; import java.util.Arrays; @@ -316,7 +316,7 @@ private void validateJavaTimestampFormats(List javaTimestampFormats, Str String timestampFormat = javaTimestampFormats.get(i); switch (timestampFormat) { case "ISO8601": - parsed = DateFormatters.forPattern("strict_date_optional_time_nanos").withZone(defaultZone).parse(text); + parsed = DateFormatter.forPattern("strict_date_optional_time_nanos").withZone(defaultZone).parse(text); break; default: java.time.format.DateTimeFormatter parser = new java.time.format.DateTimeFormatterBuilder() diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java index 505a2b871da0b..ee331a99006ed 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/integration/AutodetectResultProcessorIT.java @@ -289,11 +289,16 @@ private Bucket createBucket(boolean isInterim) { return bucket; } + private Date randomDate() { + // between 1970 and 2065 + return new Date(randomLongBetween(0, 3000000000000L)); + } + private List createRecords(boolean isInterim) { List records = new ArrayList<>(); int count = randomIntBetween(0, 100); - Date now = new Date(randomNonNegativeLong()); + Date now = randomDate(); for (int i=0; i { @@ -48,7 +47,7 @@ protected AutodetectResult createTestInstance() { FlushAcknowledgement flushAcknowledgement; String jobId = "foo"; if (randomBoolean()) { - bucket = new Bucket(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); + bucket = new Bucket(jobId, randomDate(), randomNonNegativeLong()); } else { bucket = null; } @@ -56,7 +55,7 @@ protected AutodetectResult createTestInstance() { int size = randomInt(10); records = new ArrayList<>(size); for (int i = 0; i < size; i++) { - AnomalyRecord record = new AnomalyRecord(jobId, new Date(randomLong()), randomNonNegativeLong()); + AnomalyRecord record = new AnomalyRecord(jobId, randomDate(), randomNonNegativeLong()); record.setProbability(randomDoubleBetween(0.0, 1.0, true)); records.add(record); } @@ -67,7 +66,7 @@ protected AutodetectResult createTestInstance() { influencers = new ArrayList<>(size); for (int i = 0; i < size; i++) { Influencer influencer = new Influencer(jobId, randomAlphaOfLength(10), randomAlphaOfLength(10), - new Date(randomNonNegativeLong()), randomNonNegativeLong()); + randomDate(), randomNonNegativeLong()); influencer.setProbability(randomDoubleBetween(0.0, 1.0, true)); influencers.add(influencer); } @@ -89,12 +88,13 @@ protected AutodetectResult createTestInstance() { modelSizeStats = null; } if (randomBoolean()) { - modelPlot = new ModelPlot(jobId, new Date(randomLong()), randomNonNegativeLong(), randomInt()); + modelPlot = new ModelPlot(jobId, randomDate(), randomNonNegativeLong(), randomInt()); } else { modelPlot = null; } if (randomBoolean()) { - forecast = new Forecast(jobId, randomAlphaOfLength(20), new Date(randomLong()), randomNonNegativeLong(), randomInt()); + forecast = new Forecast(jobId, randomAlphaOfLength(20), randomDate(), + randomNonNegativeLong(), randomInt()); } else { forecast = null; } @@ -110,7 +110,8 @@ protected AutodetectResult createTestInstance() { categoryDefinition = null; } if (randomBoolean()) { - flushAcknowledgement = new FlushAcknowledgement(randomAlphaOfLengthBetween(1, 20), new Date(randomNonNegativeLong())); + flushAcknowledgement = new FlushAcknowledgement(randomAlphaOfLengthBetween(1, 20), + randomDate()); } else { flushAcknowledgement = null; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java index 65343b0a068ac..a49ef0a5e26fa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/BucketTests.java @@ -33,7 +33,7 @@ public Bucket createTestInstance() { } public Bucket createTestInstance(String jobId) { - Bucket bucket = new Bucket(jobId, new Date(randomNonNegativeLong()), randomNonNegativeLong()); + Bucket bucket = new Bucket(jobId, randomDate(), randomNonNegativeLong()); if (randomBoolean()) { bucket.setAnomalyScore(randomDouble()); } @@ -92,7 +92,7 @@ protected Bucket doParseInstance(XContentParser parser) { } public void testEquals_GivenDifferentClass() { - Bucket bucket = new Bucket("foo", new Date(randomLong()), randomNonNegativeLong()); + Bucket bucket = new Bucket("foo", randomDate(), randomNonNegativeLong()); assertFalse(bucket.equals("a string")); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ForecastTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ForecastTests.java index b1d9f37dcb4f2..a5c15716ea293 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ForecastTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ForecastTests.java @@ -26,7 +26,7 @@ protected Forecast createTestInstance() { public Forecast createTestInstance(String jobId) { Forecast forecast = - new Forecast(jobId, randomAlphaOfLength(20), new Date(randomLong()), + new Forecast(jobId, randomAlphaOfLength(20), randomDate(), randomNonNegativeLong(), randomInt()); if (randomBoolean()) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ModelPlotTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ModelPlotTests.java index 2a5ceb8363b8a..37788bfa203d2 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ModelPlotTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/ModelPlotTests.java @@ -30,7 +30,7 @@ protected ModelPlot createTestInstance() { public ModelPlot createTestInstance(String jobId) { ModelPlot modelPlot = - new ModelPlot(jobId, new Date(randomLong()), randomNonNegativeLong(), randomInt()); + new ModelPlot(jobId, randomDate(), randomNonNegativeLong(), randomInt()); if (randomBoolean()) { modelPlot.setByFieldName(randomAlphaOfLengthBetween(1, 20)); } @@ -73,14 +73,16 @@ protected ModelPlot doParseInstance(XContentParser parser) { public void testEquals_GivenSameObject() { ModelPlot modelPlot = - new ModelPlot(randomAlphaOfLength(15), new Date(randomLong()), randomNonNegativeLong(), randomInt()); + new ModelPlot(randomAlphaOfLength(15), + randomDate(), randomNonNegativeLong(), randomInt()); assertTrue(modelPlot.equals(modelPlot)); } public void testEquals_GivenObjectOfDifferentClass() { ModelPlot modelPlot = - new ModelPlot(randomAlphaOfLength(15), new Date(randomLong()), randomNonNegativeLong(), randomInt()); + new ModelPlot(randomAlphaOfLength(15), + randomDate(), randomNonNegativeLong(), randomInt()); assertFalse(modelPlot.equals("a string")); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/OverallBucketTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/OverallBucketTests.java index 42b29cb5ee224..b6c0a99685d0b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/OverallBucketTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/results/OverallBucketTests.java @@ -26,7 +26,7 @@ protected OverallBucket createTestInstance() { for (int i = 0; i < jobCount; ++i) { jobs.add(new OverallBucket.JobInfo(JobTests.randomValidJobId(), randomDoubleBetween(0.0, 100.0, true))); } - return new OverallBucket(new Date(randomNonNegativeLong()), + return new OverallBucket(new Date(randomLongBetween(0, 3000000000000L)), randomIntBetween(60, 24 * 3600), randomDoubleBetween(0.0, 100.0, true), jobs, @@ -47,4 +47,4 @@ public void testCompareTo() { assertThat(jobInfo1.compareTo(jobInfo3), lessThan(0)); assertThat(jobInfo2.compareTo(jobInfo3), lessThan(0)); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java index 368758654cb9b..647835bf9311e 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTestUtils.java @@ -87,7 +87,8 @@ public static MonitoringBulkDoc randomMonitoringBulkDoc(final Random random, final MonitoredSystem system, final String type) throws IOException { final String id = random.nextBoolean() ? RandomStrings.randomAsciiLettersOfLength(random, 5) : null; - final long timestamp = RandomNumbers.randomLongBetween(random, 0L, Long.MAX_VALUE); + // ending date is the last second of 9999, should be sufficient + final long timestamp = RandomNumbers.randomLongBetween(random, 0L, 253402300799000L); final long interval = RandomNumbers.randomLongBetween(random, 0L, Long.MAX_VALUE); return new MonitoringBulkDoc(system, type, id, timestamp, interval, source, xContentType); } diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java index c23ef3c8ee51c..6caefe148b28a 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/exporter/local/LocalExporterIntegTests.java @@ -61,7 +61,7 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 1, numClientNodes = 0, transportClientRatio = 0.0, supportsDedicatedMasters = false) public class LocalExporterIntegTests extends LocalExporterIntegTestCase { - private final String indexTimeFormat = randomFrom("YY", "YYYY", "YYYY.MM", "YYYY-MM", "MM.YYYY", "MM", null); + private final String indexTimeFormat = randomFrom("yy", "yyyy", "yyyy.MM", "yyyy-MM", "MM.yyyy", "MM", null); private void stopMonitoring() { // Now disabling the monitoring service, so that no more collection are started diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java index 232034177e87b..59141d2a83aeb 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtils.java @@ -5,7 +5,7 @@ */ package org.elasticsearch.xpack.rollup; -import org.elasticsearch.common.rounding.DateTimeUnit; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; @@ -16,7 +16,6 @@ import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.RollupJobCaps; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; -import org.joda.time.DateTimeZone; import java.util.ArrayList; import java.util.Comparator; @@ -98,7 +97,7 @@ private static void checkDateHisto(DateHistogramAggregationBuilder source, List< DateHistogramInterval interval = new DateHistogramInterval((String)agg.get(RollupField.INTERVAL)); String thisTimezone = (String)agg.get(DateHistogramGroupConfig.TIME_ZONE); - String sourceTimeZone = source.timeZone() == null ? DateTimeZone.UTC.toString() : source.timeZone().toString(); + String sourceTimeZone = source.timeZone() == null ? "UTC" : source.timeZone().toString(); // Ensure we are working on the same timezone if (thisTimezone.equalsIgnoreCase(sourceTimeZone) == false) { @@ -152,10 +151,10 @@ static boolean validateCalendarInterval(DateHistogramInterval requestInterval, // The request must be gte the config. The CALENDAR_ORDERING map values are integers representing // relative orders between the calendar units - DateTimeUnit requestUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(requestInterval.toString()); - long requestOrder = requestUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); - DateTimeUnit configUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(configInterval.toString()); - long configOrder = configUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + Rounding.DateTimeUnit requestUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(requestInterval.toString()); + long requestOrder = requestUnit.getField().getBaseUnit().getDuration().toMillis(); + Rounding.DateTimeUnit configUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(configInterval.toString()); + long configOrder = configUnit.getField().getBaseUnit().getDuration().toMillis(); // All calendar units are multiples naturally, so we just care about gte return requestOrder >= configOrder; @@ -387,8 +386,8 @@ private static Comparator getComparator() { static long getMillisFixedOrCalendar(String value) { DateHistogramInterval interval = new DateHistogramInterval(value); if (isCalendarInterval(interval)) { - DateTimeUnit intervalUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()); - return intervalUnit.field(DateTimeZone.UTC).getDurationField().getUnitMillis(); + Rounding.DateTimeUnit intervalUnit = DateHistogramAggregationBuilder.DATE_FIELD_UNITS.get(interval.toString()); + return intervalUnit.getField().getBaseUnit().getDuration().toMillis(); } else { return TimeValue.parseTimeValue(value, "date_histo.comparator.interval").getMillis(); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java index ee29e56a33169..1d5f9093a29df 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupIndexer.java @@ -28,9 +28,9 @@ import org.elasticsearch.search.aggregations.support.ValueType; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.indexing.IterationResult; -import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.job.DateHistogramGroupConfig; import org.elasticsearch.xpack.core.rollup.job.GroupConfig; @@ -42,6 +42,7 @@ import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; @@ -214,7 +215,7 @@ public static List> createValueSourceBuilders(fi final DateHistogramValuesSourceBuilder dateHistogramBuilder = new DateHistogramValuesSourceBuilder(dateHistogramName); dateHistogramBuilder.dateHistogramInterval(dateHistogram.getInterval()); dateHistogramBuilder.field(dateHistogramField); - dateHistogramBuilder.timeZone(toDateTimeZone(dateHistogram.getTimeZone())); + dateHistogramBuilder.timeZone(ZoneId.of(dateHistogram.getTimeZone())); return Collections.singletonList(dateHistogramBuilder); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java index 95161e0d149dc..d05a78e121296 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/RollupJobIdentifierUtilTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; +import java.time.ZoneOffset; import java.util.ArrayList; import java.util.Arrays; import java.util.HashSet; @@ -122,14 +123,14 @@ public void testIncompatibleFixedCalendarInterval() { } public void testBadTimeZone() { - final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "EST")); + final GroupConfig group = new GroupConfig(new DateHistogramGroupConfig("foo", new DateHistogramInterval("1h"), null, "CET")); final RollupJobConfig job = new RollupJobConfig("foo", "index", "rollup", "*/5 * * * * ?", 10, group, emptyList(), null); RollupJobCaps cap = new RollupJobCaps(job); Set caps = singletonSet(cap); DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("foo").field("foo") .dateHistogramInterval(new DateHistogramInterval("1h")) - .timeZone(DateTimeZone.UTC); + .timeZone(ZoneOffset.UTC); RuntimeException e = expectThrows(RuntimeException.class, () -> RollupJobIdentifierUtils.findBestJobs(builder, caps)); assertThat(e.getMessage(), equalTo("There is not a rollup job that has a [date_histogram] agg on field " + diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java index 3dc91ede1bd2c..0032b5a88a563 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/SearchActionTests.java @@ -147,7 +147,7 @@ public void testRangeWrongTZ() { Set caps = new HashSet<>(); caps.add(cap); Exception e = expectThrows(IllegalArgumentException.class, - () -> TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1).timeZone("EST"), caps)); + () -> TransportRollupSearchAction.rewriteQuery(new RangeQueryBuilder("foo").gt(1).timeZone("CET"), caps)); assertThat(e.getMessage(), equalTo("Field [foo] in [range] query was found in rollup indices, but requested timezone is not " + "compatible. Options include: [UTC]")); } diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java index bd8a0b19f8250..9f8796f4c9589 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/config/ConfigTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.xpack.core.rollup.job.TermsGroupConfig; import org.joda.time.DateTimeZone; +import java.time.zone.ZoneRulesException; import java.util.HashMap; import java.util.Map; @@ -84,9 +85,9 @@ public void testDefaultTimeZone() { } public void testUnknownTimeZone() { - Exception e = expectThrows(IllegalArgumentException.class, + Exception e = expectThrows(ZoneRulesException.class, () -> new DateHistogramGroupConfig("foo", DateHistogramInterval.HOUR, null, "FOO")); - assertThat(e.getMessage(), equalTo("The datetime zone id 'FOO' is not recognised")); + assertThat(e.getMessage(), equalTo("Unknown time-zone ID: FOO")); } public void testEmptyHistoField() { diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java index b87b1f3761fdb..cdabb36d42760 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupIndexerIndexingTests.java @@ -29,7 +29,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponseSections; import org.elasticsearch.action.search.ShardSearchFailure; -import org.elasticsearch.common.rounding.Rounding; +import org.elasticsearch.common.Rounding; import org.elasticsearch.common.time.DateFormatter; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.IndexSettings; @@ -58,12 +58,14 @@ import org.junit.Before; import java.io.IOException; +import java.time.ZoneId; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; @@ -144,22 +146,22 @@ public void testDateHistoAndMetrics() throws Exception { final List> dataset = new ArrayList<>(); dataset.addAll( Arrays.asList( - asMap("the_histo", asLong("2015-03-31T03:00:00"), "counter", 10), - asMap("the_histo", asLong("2015-03-31T03:20:00"), "counter", 20), - asMap("the_histo", asLong("2015-03-31T03:40:00"), "counter", 20), - asMap("the_histo", asLong("2015-03-31T04:00:00"), "counter", 32), - asMap("the_histo", asLong("2015-03-31T04:20:00"), "counter", 54), - asMap("the_histo", asLong("2015-03-31T04:40:00"), "counter", 55), - asMap("the_histo", asLong("2015-03-31T05:00:00"), "counter", 55), - asMap("the_histo", asLong("2015-03-31T05:00:00"), "counter", 70), - asMap("the_histo", asLong("2015-03-31T05:20:00"), "counter", 70), - asMap("the_histo", asLong("2015-03-31T05:40:00"), "counter", 80), - asMap("the_histo", asLong("2015-03-31T06:00:00"), "counter", 80), - asMap("the_histo", asLong("2015-03-31T06:20:00"), "counter", 90), - asMap("the_histo", asLong("2015-03-31T06:40:00"), "counter", 100), - asMap("the_histo", asLong("2015-03-31T07:00:00"), "counter", 120), - asMap("the_histo", asLong("2015-03-31T07:20:00"), "counter", 120), - asMap("the_histo", asLong("2015-03-31T07:40:00"), "counter", 200) + asMap("the_histo", asLong("2015-03-31T03:00:00.000Z"), "counter", 10), + asMap("the_histo", asLong("2015-03-31T03:20:00.000Z"), "counter", 20), + asMap("the_histo", asLong("2015-03-31T03:40:00.000Z"), "counter", 20), + asMap("the_histo", asLong("2015-03-31T04:00:00.000Z"), "counter", 32), + asMap("the_histo", asLong("2015-03-31T04:20:00.000Z"), "counter", 54), + asMap("the_histo", asLong("2015-03-31T04:40:00.000Z"), "counter", 55), + asMap("the_histo", asLong("2015-03-31T05:00:00.000Z"), "counter", 55), + asMap("the_histo", asLong("2015-03-31T05:00:00.000Z"), "counter", 70), + asMap("the_histo", asLong("2015-03-31T05:20:00.000Z"), "counter", 70), + asMap("the_histo", asLong("2015-03-31T05:40:00.000Z"), "counter", 80), + asMap("the_histo", asLong("2015-03-31T06:00:00.000Z"), "counter", 80), + asMap("the_histo", asLong("2015-03-31T06:20:00.000Z"), "counter", 90), + asMap("the_histo", asLong("2015-03-31T06:40:00.000Z"), "counter", 100), + asMap("the_histo", asLong("2015-03-31T07:00:00.000Z"), "counter", 120), + asMap("the_histo", asLong("2015-03-31T07:20:00.000Z"), "counter", 120), + asMap("the_histo", asLong("2015-03-31T07:40:00.000Z"), "counter", 200) ) ); executeTestCase(dataset, job, System.currentTimeMillis(), (resp) -> { @@ -170,7 +172,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 3, "counter.avg._count", 3.0, @@ -188,7 +190,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T04:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T04:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 3, "counter.avg._count", 3.0, @@ -206,7 +208,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T05:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T05:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 4, "counter.avg._count", 4.0, @@ -224,7 +226,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T06:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T06:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 3, "counter.avg._count", 3.0, @@ -242,7 +244,7 @@ public void testDateHistoAndMetrics() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T07:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T07:00:00.000Z"), "the_histo.date_histogram.interval", "1h", "the_histo.date_histogram._count", 3, "counter.avg._count", 3.0, @@ -326,7 +328,7 @@ public void testSimpleDateHistoWithDelay() throws Exception { public void testSimpleDateHistoWithTimeZone() throws Exception { final List> dataset = new ArrayList<>(); - long now = asLong("2015-04-01T10:00:00"); + long now = asLong("2015-04-01T10:00:00.000Z"); dataset.addAll( Arrays.asList( asMap("the_histo", now - TimeValue.timeValueHours(10).getMillis()), @@ -353,7 +355,7 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00.000Z"), "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", timeZone.toString(), @@ -372,7 +374,7 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-03-31T03:00:00.000Z"), "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 2, "the_histo.date_histogram.time_zone", timeZone.toString(), @@ -385,7 +387,7 @@ public void testSimpleDateHistoWithTimeZone() throws Exception { assertThat(request.sourceAsMap(), equalTo( asMap( "_rollup.version", newIDScheme ? 2 : 1, - "the_histo.date_histogram.timestamp", asLong("2015-04-01T03:00:00"), + "the_histo.date_histogram.timestamp", asLong("2015-04-01T03:00:00.000Z"), "the_histo.date_histogram.interval", "1d", "the_histo.date_histogram._count", 5, "the_histo.date_histogram.time_zone", timeZone.toString(), @@ -449,7 +451,7 @@ static Map asMap(Object... fields) { } private static long asLong(String dateTime) { - return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseJoda(dateTime).getMillis(); + return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis(dateTime); } /** @@ -488,7 +490,8 @@ private void executeTestCase(List> docs, RollupJobConfig con private Map createFieldTypes(RollupJobConfig job) { Map fieldTypes = new HashMap<>(); MappedFieldType fieldType = new DateFieldMapper.Builder(job.getGroupConfig().getDateHistogram().getField()) - .dateTimeFormatter(DateFormatter.forPattern(randomFrom("basic_date", "date_optional_time", "epoch_second"))) + .format(randomFrom("basic_date", "date_optional_time", "epoch_second")) + .locale(Locale.ROOT) .build(new Mapper.BuilderContext(settings.getSettings(), new ContentPath(0))) .fieldType(); fieldTypes.put(fieldType.name(), fieldType); @@ -599,7 +602,7 @@ protected void doNextSearch(SearchRequest request, ActionListener createSourceBuilder() { return new DateHistogramValuesSourceBuilder(id()) .interval(interval) - .timeZone(DateUtils.zoneIdToDateTimeZone(zoneId)); + .timeZone(zoneId); } @Override From 701d89caa2e0e0d154b42a6afd50777a5213c3d3 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Wed, 23 Jan 2019 11:00:37 +0100 Subject: [PATCH 31/39] Mute FilterAggregatorTests#testRandom Relates #37743 --- .../search/aggregations/bucket/filter/FilterAggregatorTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java index f5b5d187e4187..af5f65b9698e4 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregatorTests.java @@ -63,6 +63,7 @@ public void testEmpty() throws Exception { directory.close(); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37743") public void testRandom() throws Exception { Directory directory = newDirectory(); RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory); From 6130d151725c202fd11dfdbd267478edefc83f56 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Wed, 23 Jan 2019 11:08:54 +0100 Subject: [PATCH 32/39] Adapt SyncedFlushService (#37691) --- .../indices/flush/SyncedFlushService.java | 15 +++++-------- .../flush/SyncedFlushSingleNodeTests.java | 22 +++++++++++++------ 2 files changed, 21 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 9bc4e4ead1269..0423559aaf5a5 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -32,7 +32,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; @@ -290,16 +289,14 @@ private void reportSuccessWithExistingSyncId(ShardId shardId, listener.onResponse(new ShardsSyncedFlushResult(shardId, existingSyncId, totalShards, results)); } - final IndexShardRoutingTable getShardRoutingTable(ShardId shardId, ClusterState state) { - final IndexRoutingTable indexRoutingTable = state.routingTable().index(shardId.getIndexName()); - if (indexRoutingTable == null) { - IndexMetaData index = state.getMetaData().index(shardId.getIndex()); - if (index != null && index.getState() == IndexMetaData.State.CLOSE) { - throw new IndexClosedException(shardId.getIndex()); - } + final IndexShardRoutingTable getShardRoutingTable(final ShardId shardId, final ClusterState state) { + final IndexMetaData indexMetaData = state.getMetaData().index(shardId.getIndex()); + if (indexMetaData == null) { throw new IndexNotFoundException(shardId.getIndexName()); + } else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { + throw new IndexClosedException(shardId.getIndex()); } - final IndexShardRoutingTable shardRoutingTable = indexRoutingTable.shard(shardId.id()); + final IndexShardRoutingTable shardRoutingTable = state.routingTable().index(indexMetaData.getIndex()).shard(shardId.id()); if (shardRoutingTable == null) { throw new ShardNotFoundException(shardId); } diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java index b9e0bd13f3568..9d7f3d5e253cf 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushSingleNodeTests.java @@ -20,11 +20,13 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; @@ -38,6 +40,8 @@ import java.util.Map; import java.util.concurrent.ExecutionException; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; + public class SyncedFlushSingleNodeTests extends ESSingleNodeTestCase { public void testModificationPreventsFlushing() throws InterruptedException { @@ -130,22 +134,26 @@ public void testSyncFailsIfOperationIsInFlight() throws InterruptedException, Ex } public void testSyncFailsOnIndexClosedOrMissing() throws InterruptedException { - createIndex("test"); + createIndex("test", Settings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .build()); IndexService test = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test")); - IndexShard shard = test.getShardOrNull(0); + final IndexShard shard = test.getShardOrNull(0); + assertNotNull(shard); + final ShardId shardId = shard.shardId(); + + final SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); - SyncedFlushService flushService = getInstanceFromNode(SyncedFlushService.class); SyncedFlushUtil.LatchedListener listener = new SyncedFlushUtil.LatchedListener(); - flushService.attemptSyncedFlush(new ShardId("test", "_na_", 1), listener); + flushService.attemptSyncedFlush(new ShardId(shard.shardId().getIndex(), 1), listener); listener.latch.await(); assertNotNull(listener.error); assertNull(listener.result); assertEquals(ShardNotFoundException.class, listener.error.getClass()); assertEquals("no such shard", listener.error.getMessage()); - final ShardId shardId = shard.shardId(); - - client().admin().indices().prepareClose("test").get(); + assertAcked(client().admin().indices().prepareClose("test")); listener = new SyncedFlushUtil.LatchedListener(); flushService.attemptSyncedFlush(shardId, listener); listener.latch.await(); From f54a3b5f672c5f5728a43f0976ded7f6b7733fda Mon Sep 17 00:00:00 2001 From: Alpar Torok Date: Wed, 23 Jan 2019 12:09:40 +0200 Subject: [PATCH 33/39] Don't use Groovy's `withDefault` (#37726) Closes #37061 --- .../main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy index af207bcae7ca8..54a14138505cd 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/BuildPlugin.groovy @@ -392,7 +392,7 @@ class BuildPlugin implements Plugin { static void requireJavaHome(Task task, int version) { Project rootProject = task.project.rootProject // use root project for global accounting if (rootProject.hasProperty('requiredJavaVersions') == false) { - rootProject.rootProject.ext.requiredJavaVersions = [:].withDefault{key -> return []} + rootProject.rootProject.ext.requiredJavaVersions = [:] rootProject.gradle.taskGraph.whenReady { TaskExecutionGraph taskGraph -> List messages = [] for (entry in rootProject.requiredJavaVersions) { @@ -415,7 +415,7 @@ class BuildPlugin implements Plugin { throw new GradleException("JAVA${version}_HOME required to run task:\n${task}") } } else { - rootProject.requiredJavaVersions.get(version).add(task) + rootProject.requiredJavaVersions.getOrDefault(version, []).add(task) } } From 1de286bfb3fc8190ef30ad4195f82e124b7417ff Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Wed, 23 Jan 2019 12:15:00 +0100 Subject: [PATCH 34/39] Add a note how to benchmark Elasticsearch With this commit we add a note that explains when to use benchmarks and point the reader to the macrobenchmarking tool Rally for further information. Relates #37694 --- TESTING.asciidoc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 6389f8cb3038d..cf4a40713114c 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -631,3 +631,13 @@ inside `/etc/hosts`, e.g.: 255.255.255.255 broadcasthost ::1 localhost ElasticMBP.local` .... + +== Benchmarking + +For changes that might affect the performance characteristics of Elasticsearch +you should also run macrobenchmarks. We maintain a macrobenchmarking tool +called https://github.com/elastic/rally[Rally] +which you can use to measure the performance impact. It comes with a set of +default benchmarks that we also +https://elasticsearch-benchmarks.elastic.co/[run every night]. To get started, +please see https://esrally.readthedocs.io/en/stable/[Rally's documentation]. From 100537fbc320d488c5b217999f01fdb63ce1e73e Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Wed, 23 Jan 2019 12:19:54 +0100 Subject: [PATCH 35/39] Target only specific index in update settings test With this commit we limit the update settings request to the index that is used in `IndicesClientIT#testIndexPutSettings()`. This avoids spurious exceptions involving other indices that might also be present in the test cluster. Closes #36931 Relates #37338 --- .../test/java/org/elasticsearch/client/IndicesClientIT.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java index af8a51b4900d8..55c20ed6d7792 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/IndicesClientIT.java @@ -1177,7 +1177,7 @@ public void testIndexPutSettings() throws IOException { createIndex(index, Settings.EMPTY); assertThat(dynamicSetting.getDefault(Settings.EMPTY), not(dynamicSettingValue)); - UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(); + UpdateSettingsRequest dynamicSettingRequest = new UpdateSettingsRequest(index); dynamicSettingRequest.settings(Settings.builder().put(dynamicSettingKey, dynamicSettingValue).build()); AcknowledgedResponse response = execute(dynamicSettingRequest, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync); @@ -1187,7 +1187,7 @@ public void testIndexPutSettings() throws IOException { assertThat(indexSettingsAsMap.get(dynamicSettingKey), equalTo(String.valueOf(dynamicSettingValue))); assertThat(staticSetting.getDefault(Settings.EMPTY), not(staticSettingValue)); - UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(); + UpdateSettingsRequest staticSettingRequest = new UpdateSettingsRequest(index); staticSettingRequest.settings(Settings.builder().put(staticSettingKey, staticSettingValue).build()); ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(staticSettingRequest, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); @@ -1207,7 +1207,7 @@ public void testIndexPutSettings() throws IOException { assertThat(indexSettingsAsMap.get(staticSettingKey), equalTo(staticSettingValue)); assertThat(unmodifiableSetting.getDefault(Settings.EMPTY), not(unmodifiableSettingValue)); - UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(); + UpdateSettingsRequest unmodifiableSettingRequest = new UpdateSettingsRequest(index); unmodifiableSettingRequest.settings(Settings.builder().put(unmodifiableSettingKey, unmodifiableSettingValue).build()); exception = expectThrows(ElasticsearchException.class, () -> execute(unmodifiableSettingRequest, highLevelClient().indices()::putSettings, highLevelClient().indices()::putSettingsAsync)); From 4ec3a6d922f302ed65b322506de8cdac3b8e1405 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Wed, 23 Jan 2019 12:38:44 +0100 Subject: [PATCH 36/39] Ensure either success or failure path for SearchOperationListener is called (#37467) Today we have several implementations of executing SearchOperationListener in SearchService. While all of them seem to be safe at least on, the one that executes scroll searches can cause illegal execution of SearchOperationListener that can then in-turn trigger assertions in ShardSearchStats. This change adds a SearchOperationListenerExecutor that uses try-with blocks to ensure listeners are called in a safe way. Relates to #37185 --- .../elasticsearch/search/SearchService.java | 171 ++++++++++-------- 1 file changed, 95 insertions(+), 76 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index ddec3637ed491..5e2758eb5b83c 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchTask; @@ -329,7 +328,7 @@ private DfsSearchResult executeDfsPhase(ShardSearchRequest request, SearchTask t } catch (Exception e) { logger.trace("Dfs phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -380,29 +379,24 @@ protected void doRun() { }); } - private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTask task) throws IOException { + private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTask task) throws Exception { final SearchContext context = createAndPutContext(request); - final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); - boolean queryPhaseSuccess = false; try { context.setTask(task); - operationListener.onPreQueryPhase(context); - long time = System.nanoTime(); - contextProcessing(context); - - loadOrExecuteQueryPhase(request, context); - - if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) { - freeContext(context.id()); - } else { - contextProcessedSuccessfully(context); + final long afterQueryTime; + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { + contextProcessing(context); + loadOrExecuteQueryPhase(request, context); + if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) { + freeContext(context.id()); + } else { + contextProcessedSuccessfully(context); + } + afterQueryTime = executor.success(); } - final long afterQueryTime = System.nanoTime(); - queryPhaseSuccess = true; - operationListener.onQueryPhase(context, afterQueryTime - time); if (request.numberOfShards() == 1) { - return executeFetchPhase(context, operationListener, afterQueryTime); + return executeFetchPhase(context, afterQueryTime); } return context.queryResult(); } catch (Exception e) { @@ -411,21 +405,16 @@ private SearchPhaseResult executeQueryPhase(ShardSearchRequest request, SearchTa e = (e.getCause() == null || e.getCause() instanceof Exception) ? (Exception) e.getCause() : new ElasticsearchException(e.getCause()); } - if (!queryPhaseSuccess) { - operationListener.onFailedQueryPhase(context); - } logger.trace("Query phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } } - private QueryFetchSearchResult executeFetchPhase(SearchContext context, SearchOperationListener operationListener, - long afterQueryTime) { - operationListener.onPreFetchPhase(context); - try { + private QueryFetchSearchResult executeFetchPhase(SearchContext context, long afterQueryTime) { + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, afterQueryTime)){ shortcutDocIdsToLoad(context); fetchPhase.execute(context); if (fetchPhaseShouldFreeContext(context)) { @@ -433,34 +422,27 @@ private QueryFetchSearchResult executeFetchPhase(SearchContext context, SearchOp } else { contextProcessedSuccessfully(context); } - } catch (Exception e) { - operationListener.onFailedFetchPhase(context); - throw ExceptionsHelper.convertToRuntime(e); + executor.success(); } - operationListener.onFetchPhase(context, System.nanoTime() - afterQueryTime); return new QueryFetchSearchResult(context.queryResult(), context.fetchResult()); } public void executeQueryPhase(InternalScrollSearchRequest request, SearchTask task, ActionListener listener) { runAsync(request.id(), () -> { final SearchContext context = findContext(request.id(), request); - SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); - try { + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { context.setTask(task); - operationListener.onPreQueryPhase(context); - long time = System.nanoTime(); contextProcessing(context); processScroll(request, context); queryPhase.execute(context); contextProcessedSuccessfully(context); - operationListener.onQueryPhase(context, System.nanoTime() - time); + executor.success(); return new ScrollQuerySearchResult(context.queryResult(), context.shardTarget()); } catch (Exception e) { - operationListener.onFailedQueryPhase(context); logger.trace("Query phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -471,15 +453,10 @@ public void executeQueryPhase(QuerySearchRequest request, SearchTask task, Actio runAsync(request.id(), () -> { final SearchContext context = findContext(request.id(), request); context.setTask(task); - IndexShard indexShard = context.indexShard(); - SearchOperationListener operationListener = indexShard.getSearchOperationListener(); context.incRef(); - try { + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)) { contextProcessing(context); context.searcher().setAggregatedDfs(request.dfs()); - - operationListener.onPreQueryPhase(context); - long time = System.nanoTime(); queryPhase.execute(context); if (context.queryResult().hasSearchContext() == false && context.scrollContext() == null) { // no hits, we can release the context since there will be no fetch phase @@ -487,13 +464,12 @@ public void executeQueryPhase(QuerySearchRequest request, SearchTask task, Actio } else { contextProcessedSuccessfully(context); } - operationListener.onQueryPhase(context, System.nanoTime() - time); + executor.success(); return context.queryResult(); } catch (Exception e) { - operationListener.onFailedQueryPhase(context); logger.trace("Query phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -527,28 +503,19 @@ public void executeFetchPhase(InternalScrollSearchRequest request, SearchTask ta ActionListener listener) { runAsync(request.id(), () -> { final SearchContext context = findContext(request.id(), request); + context.setTask(task); context.incRef(); - try { - context.setTask(task); + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context)){ contextProcessing(context); - SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); processScroll(request, context); - operationListener.onPreQueryPhase(context); - final long time = System.nanoTime(); - try { - queryPhase.execute(context); - } catch (Exception e) { - operationListener.onFailedQueryPhase(context); - throw ExceptionsHelper.convertToRuntime(e); - } - long afterQueryTime = System.nanoTime(); - operationListener.onQueryPhase(context, afterQueryTime - time); - QueryFetchSearchResult fetchSearchResult = executeFetchPhase(context, operationListener, afterQueryTime); + queryPhase.execute(context); + final long afterQueryTime = executor.success(); + QueryFetchSearchResult fetchSearchResult = executeFetchPhase(context, afterQueryTime); return new ScrollQueryFetchSearchResult(fetchSearchResult, context.shardTarget()); } catch (Exception e) { logger.trace("Fetch phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -558,7 +525,6 @@ public void executeFetchPhase(InternalScrollSearchRequest request, SearchTask ta public void executeFetchPhase(ShardFetchRequest request, SearchTask task, ActionListener listener) { runAsync(request.id(), () -> { final SearchContext context = findContext(request.id(), request); - final SearchOperationListener operationListener = context.indexShard().getSearchOperationListener(); context.incRef(); try { context.setTask(task); @@ -567,21 +533,20 @@ public void executeFetchPhase(ShardFetchRequest request, SearchTask task, Action context.scrollContext().lastEmittedDoc = request.lastEmittedDoc(); } context.docIdsToLoad(request.docIds(), 0, request.docIdsSize()); - operationListener.onPreFetchPhase(context); - long time = System.nanoTime(); - fetchPhase.execute(context); - if (fetchPhaseShouldFreeContext(context)) { - freeContext(request.id()); - } else { - contextProcessedSuccessfully(context); + try (SearchOperationListenerExecutor executor = new SearchOperationListenerExecutor(context, true, System.nanoTime())) { + fetchPhase.execute(context); + if (fetchPhaseShouldFreeContext(context)) { + freeContext(request.id()); + } else { + contextProcessedSuccessfully(context); + } + executor.success(); } - operationListener.onFetchPhase(context, System.nanoTime() - time); return context.fetchResult(); } catch (Exception e) { - operationListener.onFailedFetchPhase(context); logger.trace("Fetch phase failed", e); processFailure(context, e); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } finally { cleanContext(context); } @@ -661,7 +626,7 @@ final SearchContext createContext(ShardSearchRequest request) throws IOException context.lowLevelCancellation(lowLevelCancellation); } catch (Exception e) { context.close(); - throw ExceptionsHelper.convertToRuntime(e); + throw e; } return context; @@ -733,7 +698,7 @@ public void freeAllScrollContexts() { } } - private void contextScrollKeepAlive(SearchContext context, long keepAlive) throws IOException { + private void contextScrollKeepAlive(SearchContext context, long keepAlive) { if (keepAlive > maxKeepAlive) { throw new IllegalArgumentException( "Keep alive for scroll (" + TimeValue.timeValueMillis(keepAlive) + ") is too large. " + @@ -991,7 +956,7 @@ private void shortcutDocIdsToLoad(SearchContext context) { context.docIdsToLoad(docIdsToLoad, 0, docIdsToLoad.length); } - private void processScroll(InternalScrollSearchRequest request, SearchContext context) throws IOException { + private void processScroll(InternalScrollSearchRequest request, SearchContext context) { // process scroll context.from(context.from() + context.size()); context.scrollContext().scroll = request.scroll(); @@ -1147,4 +1112,58 @@ public boolean canMatch() { return canMatch; } } + + /** + * This helper class ensures we only execute either the success or the failure path for {@link SearchOperationListener}. + * This is crucial for some implementations like {@link org.elasticsearch.index.search.stats.ShardSearchStats}. + */ + private static final class SearchOperationListenerExecutor implements AutoCloseable { + private final SearchOperationListener listener; + private final SearchContext context; + private final long time; + private final boolean fetch; + private long afterQueryTime = -1; + private boolean closed = false; + + SearchOperationListenerExecutor(SearchContext context) { + this(context, false, System.nanoTime()); + } + + SearchOperationListenerExecutor(SearchContext context, boolean fetch, long startTime) { + this.listener = context.indexShard().getSearchOperationListener(); + this.context = context; + time = startTime; + this.fetch = fetch; + if (fetch) { + listener.onPreFetchPhase(context); + } else { + listener.onPreQueryPhase(context); + } + } + + long success() { + return afterQueryTime = System.nanoTime(); + } + + @Override + public void close() { + assert closed == false : "already closed - while technically ok double closing is a likely a bug in this case"; + if (closed == false) { + closed = true; + if (afterQueryTime != -1) { + if (fetch) { + listener.onFetchPhase(context, afterQueryTime - time); + } else { + listener.onQueryPhase(context, afterQueryTime - time); + } + } else { + if (fetch) { + listener.onFailedFetchPhase(context); + } else { + listener.onFailedQueryPhase(context); + } + } + } + } + } } From d5139e0590ed044c0950cfede5bac537a3fba2b3 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 23 Jan 2019 13:10:11 +0100 Subject: [PATCH 37/39] Only bootstrap and elect node in current voting configuration (#37712) Adapts bootstrapping and leader election to only trigger on nodes that are actually part of the voting configuration. --- .../coordination/ClusterBootstrapService.java | 9 +++- .../cluster/coordination/Coordinator.java | 39 +++++++++++++++++- .../ClusterBootstrapServiceTests.java | 12 ++++++ .../coordination/CoordinatorTests.java | 41 +++++++++++++------ 4 files changed, 87 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index d21c54c03e4e5..cdbf6b6691077 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -97,7 +97,7 @@ public static boolean discoveryIsConfigured(Settings settings) { void onFoundPeersUpdated() { final Set nodes = getDiscoveredNodes(); - if (transportService.getLocalNode().isMasterNode() && bootstrapRequirements.isEmpty() == false + if (bootstrappingPermitted.get() && transportService.getLocalNode().isMasterNode() && bootstrapRequirements.isEmpty() == false && isBootstrappedSupplier.getAsBoolean() == false && nodes.stream().noneMatch(Coordinator::isZen1Node)) { final Tuple,List> requirementMatchingResult; @@ -114,6 +114,13 @@ void onFoundPeersUpdated() { logger.trace("nodesMatchingRequirements={}, unsatisfiedRequirements={}, bootstrapRequirements={}", nodesMatchingRequirements, unsatisfiedRequirements, bootstrapRequirements); + if (nodesMatchingRequirements.contains(transportService.getLocalNode()) == false) { + logger.info("skipping cluster bootstrapping as local node does not match bootstrap requirements: {}", + bootstrapRequirements); + bootstrappingPermitted.set(false); + return; + } + if (nodesMatchingRequirements.size() * 2 > bootstrapRequirements.size()) { startBootstrap(nodesMatchingRequirements, unsatisfiedRequirements); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 084d5cf38f2db..4a018c1f78f91 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -348,6 +348,12 @@ private void startElection() { // The preVoteCollector is only active while we are candidate, but it does not call this method with synchronisation, so we have // to check our mode again here. if (mode == Mode.CANDIDATE) { + if (electionQuorumContainsLocalNode(getLastAcceptedState()) == false) { + logger.trace("skip election as local node is not part of election quorum: {}", + getLastAcceptedState().coordinationMetaData()); + return; + } + final StartJoinRequest startJoinRequest = new StartJoinRequest(getLocalNode(), Math.max(getCurrentTerm(), maxTermSeen) + 1); logger.debug("starting election with {}", startJoinRequest); @@ -360,6 +366,13 @@ private void startElection() { } } + private static boolean electionQuorumContainsLocalNode(ClusterState lastAcceptedState) { + final String localNodeId = lastAcceptedState.nodes().getLocalNodeId(); + assert localNodeId != null; + return lastAcceptedState.getLastCommittedConfiguration().getNodeIds().contains(localNodeId) + || lastAcceptedState.getLastAcceptedConfiguration().getNodeIds().contains(localNodeId); + } + private Optional ensureTermAtLeast(DiscoveryNode sourceNode, long targetTerm) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; if (getCurrentTerm() < targetTerm) { @@ -709,10 +722,24 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura return false; } + if (getLocalNode().isMasterNode() == false) { + logger.debug("skip setting initial configuration as local node is not a master-eligible node"); + throw new CoordinationStateRejectedException( + "this node is not master-eligible, but cluster bootstrapping can only happen on a master-eligible node"); + } + + if (votingConfiguration.getNodeIds().contains(getLocalNode().getId()) == false) { + logger.debug("skip setting initial configuration as local node is not part of initial configuration"); + throw new CoordinationStateRejectedException("local node is not part of initial configuration"); + } + final List knownNodes = new ArrayList<>(); knownNodes.add(getLocalNode()); peerFinder.getFoundPeers().forEach(knownNodes::add); + if (votingConfiguration.hasQuorum(knownNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toList())) == false) { + logger.debug("skip setting initial configuration as not enough nodes discovered to form a quorum in the " + + "initial configuration [knownNodes={}, {}]", knownNodes, votingConfiguration); throw new CoordinationStateRejectedException("not enough nodes discovered to form a quorum in the initial configuration " + "[knownNodes=" + knownNodes + ", " + votingConfiguration + "]"); } @@ -729,6 +756,8 @@ public boolean setInitialConfiguration(final VotingConfiguration votingConfigura metaDataBuilder.coordinationMetaData(coordinationMetaData); coordinationState.get().setInitialState(ClusterState.builder(currentState).metaData(metaDataBuilder).build()); + assert electionQuorumContainsLocalNode(getLastAcceptedState()) : + "initial state does not have local node in its election quorum: " + getLastAcceptedState().coordinationMetaData(); preVoteCollector.update(getPreVoteResponse(), null); // pick up the change to last-accepted version startElectionScheduler(); return true; @@ -1022,12 +1051,20 @@ private void startElectionScheduler() { public void run() { synchronized (mutex) { if (mode == Mode.CANDIDATE) { + final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); + + if (electionQuorumContainsLocalNode(lastAcceptedState) == false) { + logger.trace("skip prevoting as local node is not part of election quorum: {}", + lastAcceptedState.coordinationMetaData()); + return; + } + if (prevotingRound != null) { prevotingRound.close(); } - final ClusterState lastAcceptedState = coordinationState.get().getLastAcceptedState(); final List discoveredNodes = getDiscoveredNodes().stream().filter(n -> isZen1Node(n) == false).collect(Collectors.toList()); + prevotingRound = preVoteCollector.start(lastAcceptedState, discoveredNodes); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java index 46a43afa53897..c9ebdf278c71d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -328,6 +328,18 @@ public void testDoesNotBootstrapsOnNonMasterNode() { deterministicTaskQueue.runAllTasks(); } + public void testDoesNotBootstrapsIfLocalNodeNotInInitialMasterNodes() { + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(Settings.builder().putList( + INITIAL_MASTER_NODES_SETTING.getKey(), otherNode1.getName(), otherNode2.getName()).build(), + transportService, () -> + Stream.of(localNode, otherNode1, otherNode2).collect(Collectors.toList()), () -> false, vc -> { + throw new AssertionError("should not be called"); + }); + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + } + public void testDoesNotBootstrapsIfNotConfigured() { ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService( Settings.builder().putList(INITIAL_MASTER_NODES_SETTING.getKey()).build(), transportService, diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index a9ca7d917b9d8..7db63ab120e91 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.coordination; import com.carrotsearch.randomizedtesting.RandomizedContext; - import org.apache.logging.log4j.CloseableThreadContext; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -53,6 +52,7 @@ import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.discovery.zen.PublishClusterStateStats; import org.elasticsearch.discovery.zen.UnicastHostsProvider.HostsResolver; import org.elasticsearch.env.NodeEnvironment; @@ -93,10 +93,10 @@ import static org.elasticsearch.cluster.coordination.CoordinationStateTests.clusterState; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.setValue; import static org.elasticsearch.cluster.coordination.CoordinationStateTests.value; -import static org.elasticsearch.cluster.coordination.Coordinator.PUBLISH_TIMEOUT_SETTING; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.CANDIDATE; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.FOLLOWER; import static org.elasticsearch.cluster.coordination.Coordinator.Mode.LEADER; +import static org.elasticsearch.cluster.coordination.Coordinator.PUBLISH_TIMEOUT_SETTING; import static org.elasticsearch.cluster.coordination.CoordinatorTests.Cluster.DEFAULT_DELAY_VARIABILITY; import static org.elasticsearch.cluster.coordination.ElectionSchedulerFactory.ELECTION_BACK_OFF_TIME_SETTING; import static org.elasticsearch.cluster.coordination.ElectionSchedulerFactory.ELECTION_DURATION_SETTING; @@ -117,7 +117,6 @@ import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; -import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; @@ -745,7 +744,7 @@ public void testSettingInitialConfigurationTriggersElection() { assertThat(nodeId + " should have found all peers", foundPeers, hasSize(cluster.size())); } - final ClusterNode bootstrapNode = cluster.getAnyNode(); + final ClusterNode bootstrapNode = cluster.getAnyBootstrappableNode(); bootstrapNode.applyInitialConfiguration(); assertTrue(bootstrapNode.getId() + " has been bootstrapped", bootstrapNode.coordinator.isInitialConfigurationSet()); @@ -775,13 +774,13 @@ public void testCannotSetInitialConfigurationTwice() { public void testCannotSetInitialConfigurationWithoutQuorum() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); final Coordinator coordinator = cluster.getAnyNode().coordinator; - final VotingConfiguration unknownNodeConfiguration = new VotingConfiguration(Collections.singleton("unknown-node")); + final VotingConfiguration unknownNodeConfiguration = new VotingConfiguration( + Sets.newHashSet(coordinator.getLocalNode().getId(), "unknown-node")); final String exceptionMessage = expectThrows(CoordinationStateRejectedException.class, () -> coordinator.setInitialConfiguration(unknownNodeConfiguration)).getMessage(); assertThat(exceptionMessage, startsWith("not enough nodes discovered to form a quorum in the initial configuration [knownNodes=[")); - assertThat(exceptionMessage, - endsWith("], VotingConfiguration{unknown-node}]")); + assertThat(exceptionMessage, containsString("unknown-node")); assertThat(exceptionMessage, containsString(coordinator.getLocalNode().toString())); // This is VERY BAD: setting a _different_ initial configuration. Yet it works if the first attempt will never be a quorum. @@ -789,6 +788,16 @@ public void testCannotSetInitialConfigurationWithoutQuorum() { cluster.stabilise(); } + public void testCannotSetInitialConfigurationWithoutLocalNode() { + final Cluster cluster = new Cluster(randomIntBetween(1, 5)); + final Coordinator coordinator = cluster.getAnyNode().coordinator; + final VotingConfiguration unknownNodeConfiguration = new VotingConfiguration(Sets.newHashSet("unknown-node")); + final String exceptionMessage = expectThrows(CoordinationStateRejectedException.class, + () -> coordinator.setInitialConfiguration(unknownNodeConfiguration)).getMessage(); + assertThat(exceptionMessage, + equalTo("local node is not part of initial configuration")); + } + public void testDiffBasedPublishing() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runRandomly(); @@ -1331,7 +1340,7 @@ void bootstrapIfNecessary() { assertThat("setting initial configuration may fail with disconnected nodes", disconnectedNodes, empty()); assertThat("setting initial configuration may fail with blackholed nodes", blackholedNodes, empty()); runFor(defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_SETTING) * 2, "discovery prior to setting initial configuration"); - final ClusterNode bootstrapNode = getAnyMasterEligibleNode(); + final ClusterNode bootstrapNode = getAnyBootstrappableNode(); bootstrapNode.applyInitialConfiguration(); } else { logger.info("setting initial configuration not required"); @@ -1402,8 +1411,10 @@ boolean nodeExists(DiscoveryNode node) { return clusterNodes.stream().anyMatch(cn -> cn.getLocalNode().equals(node)); } - ClusterNode getAnyMasterEligibleNode() { - return randomFrom(clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()).collect(Collectors.toList())); + ClusterNode getAnyBootstrappableNode() { + return randomFrom(clusterNodes.stream().filter(n -> n.getLocalNode().isMasterNode()) + .filter(n -> initialConfiguration.getNodeIds().contains(n.getLocalNode().getId())) + .collect(Collectors.toList())); } ClusterNode getAnyNode() { @@ -1737,8 +1748,14 @@ void applyInitialConfiguration() { Stream.generate(() -> BOOTSTRAP_PLACEHOLDER_PREFIX + UUIDs.randomBase64UUID(random())) .limit((Math.max(initialConfiguration.getNodeIds().size(), 2) - 1) / 2) .forEach(nodeIdsWithPlaceholders::add); - final VotingConfiguration configurationWithPlaceholders = new VotingConfiguration(new HashSet<>( - randomSubsetOf(initialConfiguration.getNodeIds().size(), nodeIdsWithPlaceholders))); + final Set nodeIds = new HashSet<>( + randomSubsetOf(initialConfiguration.getNodeIds().size(), nodeIdsWithPlaceholders)); + // initial configuration should not have a place holder for local node + if (initialConfiguration.getNodeIds().contains(localNode.getId()) && nodeIds.contains(localNode.getId()) == false) { + nodeIds.remove(nodeIds.iterator().next()); + nodeIds.add(localNode.getId()); + } + final VotingConfiguration configurationWithPlaceholders = new VotingConfiguration(nodeIds); try { coordinator.setInitialConfiguration(configurationWithPlaceholders); logger.info("successfully set initial configuration to {}", configurationWithPlaceholders); From 12f5b02fd0364de99b5d2d035cc7ffb579b43671 Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Wed, 23 Jan 2019 13:53:37 +0100 Subject: [PATCH 38/39] Streamline skip_unavailable handling (#37672) This commit moves the collectSearchShards method out of RemoteClusterService into TransportSearchAction that currently calls it. RemoteClusterService used to be used only for cross-cluster search but is now also used in cross-cluster replication where different API are called through the RemoteClusterAwareClient. There is no reason for the collectSearchShards and fetchShards methods to be respectively in RemoteClusterService and RemoteClusterConnection. The search shards API can be called through the RemoteClusterAwareClient too, the only missing bit is a way to handle failures based on the skip_unavailable setting for each cluster (currently only supported in RemoteClusterConnection#fetchShards) which is achieved by adding a isSkipUnavailable(String clusterAlias) method to RemoteClusterService. This change is useful for #32125 as we will very soon need to also call the search API against remote clusters, which will be done through RemoteClusterAwareClient. In that case we will also need to support skip_unavailable when calling the search API so we need some way to handle the skip_unavailable setting like we currently do for the search_shards call. Relates to #32125 --- .../shards/ClusterSearchShardsResponse.java | 4 - .../action/search/TransportSearchAction.java | 95 ++++++-- .../transport/RemoteClusterConnection.java | 62 +----- .../transport/RemoteClusterService.java | 65 +----- .../search/TransportSearchActionTests.java | 210 +++++++++++++++--- .../RemoteClusterAwareClientTests.java | 140 ++++++++++++ .../RemoteClusterConnectionTests.java | 203 +---------------- .../transport/RemoteClusterServiceTests.java | 209 +++-------------- 8 files changed, 445 insertions(+), 543 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 57407bd61fb82..c8889c86c1df7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -29,15 +29,11 @@ import java.io.IOException; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.Map; public class ClusterSearchShardsResponse extends ActionResponse implements ToXContentObject { - public static final ClusterSearchShardsResponse EMPTY = new ClusterSearchShardsResponse(new ClusterSearchShardsGroup[0], - new DiscoveryNode[0], Collections.emptyMap()); - private final ClusterSearchShardsGroup[] groups; private final DiscoveryNode[] nodes; private final Map indicesAndFilters; diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 3f03c521df52a..30e030eca7376 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -22,10 +22,12 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -39,6 +41,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.index.Index; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.shard.ShardId; @@ -50,6 +53,7 @@ import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.Transport; import org.elasticsearch.transport.TransportService; @@ -60,8 +64,11 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.LongSupplier; @@ -195,17 +202,23 @@ protected void doExecute(Task task, SearchRequest searchRequest, ActionListener< executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, Collections.emptyList(), (clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener, SearchResponse.Clusters.EMPTY); } else { - remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), - searchRequest.routing(), remoteClusterIndices, ActionListener.wrap((searchShardsResponses) -> { - List remoteShardIterators = new ArrayList<>(); - Map remoteAliasFilters = new HashMap<>(); - BiFunction clusterNodeLookup = processRemoteShards(searchShardsResponses, - remoteClusterIndices, remoteShardIterators, remoteAliasFilters); - SearchResponse.Clusters clusters = buildClusters(localIndices, remoteClusterIndices, searchShardsResponses); - executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, - remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener, - clusters); - }, listener::onFailure)); + AtomicInteger skippedClusters = new AtomicInteger(0); + collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), searchRequest.routing(), skippedClusters, + remoteClusterIndices, remoteClusterService, threadPool, + ActionListener.wrap( + searchShardsResponses -> { + List remoteShardIterators = new ArrayList<>(); + Map remoteAliasFilters = new HashMap<>(); + BiFunction clusterNodeLookup = processRemoteShards( + searchShardsResponses, remoteClusterIndices, remoteShardIterators, remoteAliasFilters); + int localClusters = localIndices == null ? 0 : 1; + int totalClusters = remoteClusterIndices.size() + localClusters; + int successfulClusters = searchShardsResponses.size() + localClusters; + executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, + remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener, + new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get())); + }, + listener::onFailure)); } }, listener::onFailure); if (searchRequest.source() == null) { @@ -216,18 +229,56 @@ protected void doExecute(Task task, SearchRequest searchRequest, ActionListener< } } - static SearchResponse.Clusters buildClusters(OriginalIndices localIndices, Map remoteIndices, - Map searchShardsResponses) { - int localClusters = localIndices == null ? 0 : 1; - int totalClusters = remoteIndices.size() + localClusters; - int successfulClusters = localClusters; - for (ClusterSearchShardsResponse searchShardsResponse : searchShardsResponses.values()) { - if (searchShardsResponse != ClusterSearchShardsResponse.EMPTY) { - successfulClusters++; - } + static void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing, AtomicInteger skippedClusters, + Map remoteIndicesByCluster, RemoteClusterService remoteClusterService, + ThreadPool threadPool, ActionListener> listener) { + final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); + final Map searchShardsResponses = new ConcurrentHashMap<>(); + final AtomicReference transportException = new AtomicReference<>(); + for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { + final String clusterAlias = entry.getKey(); + boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias); + Client clusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias); + final String[] indices = entry.getValue().indices(); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) + .indicesOptions(indicesOptions).local(true).preference(preference).routing(routing); + clusterClient.admin().cluster().searchShards(searchShardsRequest, new ActionListener() { + @Override + public void onResponse(ClusterSearchShardsResponse response) { + searchShardsResponses.put(clusterAlias, response); + maybeFinish(); + } + + @Override + public void onFailure(Exception e) { + if (skipUnavailable) { + skippedClusters.incrementAndGet(); + } else { + RemoteTransportException exception = + new RemoteTransportException("error while communicating with remote cluster [" + clusterAlias + "]", e); + if (transportException.compareAndSet(null, exception) == false) { + transportException.accumulateAndGet(exception, (previous, current) -> { + current.addSuppressed(previous); + return current; + }); + } + } + maybeFinish(); + } + + private void maybeFinish() { + if (responsesCountDown.countDown()) { + RemoteTransportException exception = transportException.get(); + if (exception == null) { + listener.onResponse(searchShardsResponses); + } else { + listener.onFailure(transportException.get()); + } + } + } + } + ); } - int skippedClusters = totalClusters - successfulClusters; - return new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters); } static BiFunction processRemoteShards(Map searchShardsResponses, diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java index 7ea55925262ff..d7e3de92e4028 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterConnection.java @@ -25,9 +25,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -62,7 +59,6 @@ import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; import java.util.function.Supplier; @@ -172,6 +168,13 @@ void updateSkipUnavailable(boolean skipUnavailable) { this.skipUnavailable = skipUnavailable; } + /** + * Returns whether this cluster is configured to be skipped when unavailable + */ + boolean isSkipUnavailable() { + return skipUnavailable; + } + @Override public void onNodeDisconnected(DiscoveryNode node) { boolean remove = connectedNodes.remove(node); @@ -181,31 +184,11 @@ public void onNodeDisconnected(DiscoveryNode node) { } } - /** - * Fetches all shards for the search request from this remote connection. This is used to later run the search on the remote end. - */ - public void fetchSearchShards(ClusterSearchShardsRequest searchRequest, - ActionListener listener) { - - final ActionListener searchShardsListener; - final Consumer onConnectFailure; - if (skipUnavailable) { - onConnectFailure = (exception) -> listener.onResponse(ClusterSearchShardsResponse.EMPTY); - searchShardsListener = ActionListener.wrap(listener::onResponse, (e) -> listener.onResponse(ClusterSearchShardsResponse.EMPTY)); - } else { - onConnectFailure = listener::onFailure; - searchShardsListener = listener; - } - // in case we have no connected nodes we try to connect and if we fail we either notify the listener or not depending on - // the skip_unavailable setting - ensureConnected(ActionListener.wrap((x) -> fetchShardsInternal(searchRequest, searchShardsListener), onConnectFailure)); - } - /** * Ensures that this cluster is connected. If the cluster is connected this operation * will invoke the listener immediately. */ - public void ensureConnected(ActionListener voidActionListener) { + void ensureConnected(ActionListener voidActionListener) { if (connectedNodes.size() == 0) { connectHandler.connect(voidActionListener); } else { @@ -213,35 +196,6 @@ public void ensureConnected(ActionListener voidActionListener) { } } - private void fetchShardsInternal(ClusterSearchShardsRequest searchShardsRequest, - final ActionListener listener) { - final DiscoveryNode node = getAnyConnectedNode(); - Transport.Connection connection = connectionManager.getConnection(node); - transportService.sendRequest(connection, ClusterSearchShardsAction.NAME, searchShardsRequest, TransportRequestOptions.EMPTY, - new TransportResponseHandler() { - - @Override - public ClusterSearchShardsResponse read(StreamInput in) throws IOException { - return new ClusterSearchShardsResponse(in); - } - - @Override - public void handleResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { - listener.onResponse(clusterSearchShardsResponse); - } - - @Override - public void handleException(TransportException e) { - listener.onFailure(e); - } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } - }); - } - /** * Collects all nodes on the connected cluster and returns / passes a nodeID to {@link DiscoveryNode} lookup function * that returns null if the node ID is not found. diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index d9fcb01df4ce8..7d19b2eebcb1d 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -24,8 +24,6 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; @@ -50,10 +48,8 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Function; import java.util.function.Predicate; @@ -287,7 +283,7 @@ public Map groupIndices(IndicesOptions indicesOptions, String clusterAlias = entry.getKey(); List originalIndices = entry.getValue(); originalIndicesMap.put(clusterAlias, - new OriginalIndices(originalIndices.toArray(new String[originalIndices.size()]), indicesOptions)); + new OriginalIndices(originalIndices.toArray(new String[0]), indicesOptions)); } } } else { @@ -311,55 +307,6 @@ public Set getRegisteredRemoteClusterNames() { return remoteClusters.keySet(); } - public void collectSearchShards(IndicesOptions indicesOptions, String preference, String routing, - Map remoteIndicesByCluster, - ActionListener> listener) { - final CountDown responsesCountDown = new CountDown(remoteIndicesByCluster.size()); - final Map searchShardsResponses = new ConcurrentHashMap<>(); - final AtomicReference transportException = new AtomicReference<>(); - for (Map.Entry entry : remoteIndicesByCluster.entrySet()) { - final String clusterName = entry.getKey(); - RemoteClusterConnection remoteClusterConnection = remoteClusters.get(clusterName); - if (remoteClusterConnection == null) { - throw new IllegalArgumentException("no such remote cluster: " + clusterName); - } - final String[] indices = entry.getValue().indices(); - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices) - .indicesOptions(indicesOptions).local(true).preference(preference) - .routing(routing); - remoteClusterConnection.fetchSearchShards(searchShardsRequest, - new ActionListener() { - @Override - public void onResponse(ClusterSearchShardsResponse clusterSearchShardsResponse) { - searchShardsResponses.put(clusterName, clusterSearchShardsResponse); - if (responsesCountDown.countDown()) { - RemoteTransportException exception = transportException.get(); - if (exception == null) { - listener.onResponse(searchShardsResponses); - } else { - listener.onFailure(transportException.get()); - } - } - } - - @Override - public void onFailure(Exception e) { - RemoteTransportException exception = - new RemoteTransportException("error while communicating with remote cluster [" + clusterName + "]", e); - if (transportException.compareAndSet(null, exception) == false) { - exception = transportException.accumulateAndGet(exception, (previous, current) -> { - current.addSuppressed(previous); - return current; - }); - } - if (responsesCountDown.countDown()) { - listener.onFailure(exception); - } - } - }); - } - } - /** * Returns a connection to the given node on the given remote cluster * @throws IllegalArgumentException if the remote cluster is unknown @@ -376,6 +323,13 @@ void ensureConnected(String clusterAlias, ActionListener listener) { getRemoteClusterConnection(clusterAlias).ensureConnected(listener); } + /** + * Returns whether the cluster identified by the provided alias is configured to be skipped when unavailable + */ + public boolean isSkipUnavailable(String clusterAlias) { + return getRemoteClusterConnection(clusterAlias).isSkipUnavailable(); + } + public Transport.Connection getConnection(String cluster) { return getRemoteClusterConnection(cluster).getConnection(); } @@ -399,7 +353,7 @@ public void listenForUpdates(ClusterSettings clusterSettings) { clusterSettings.addAffixUpdateConsumer(SEARCH_REMOTE_CLUSTER_SKIP_UNAVAILABLE, this::updateSkipUnavailable, (alias, value) -> {}); } - synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { + private synchronized void updateSkipUnavailable(String clusterAlias, Boolean skipUnavailable) { RemoteClusterConnection remote = this.remoteClusters.get(clusterAlias); if (remote != null) { remote.updateSkipUnavailable(skipUnavailable); @@ -510,5 +464,4 @@ public Client getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) Collection getConnections() { return remoteClusters.values(); } - } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index 16ff4389d7c4a..1b99beee65e81 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -21,6 +21,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup; import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; @@ -38,13 +39,19 @@ import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.internal.AliasFilter; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.NodeDisconnectedException; +import org.elasticsearch.transport.RemoteClusterConnectionTests; import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.RemoteClusterServiceTests; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.Transport; +import org.elasticsearch.transport.TransportConnectionListener; import org.elasticsearch.transport.TransportException; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportRequestOptions; @@ -53,13 +60,22 @@ import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Function; import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.startsWith; public class TransportSearchActionTests extends ESTestCase { @@ -304,41 +320,169 @@ public void close() { } } - public void testBuildClusters() { - OriginalIndices localIndices = randomBoolean() ? null : randomOriginalIndices(); - Map remoteIndices = new HashMap<>(); - Map searchShardsResponses = new HashMap<>(); - int numRemoteClusters = randomIntBetween(0, 10); - boolean onlySuccessful = randomBoolean(); - int localClusters = localIndices == null ? 0 : 1; - int total = numRemoteClusters + localClusters; - int successful = localClusters; - int skipped = 0; - for (int i = 0; i < numRemoteClusters; i++) { - String cluster = randomAlphaOfLengthBetween(5, 10); - remoteIndices.put(cluster, randomOriginalIndices()); - if (onlySuccessful || randomBoolean()) { - //whatever response counts as successful as long as it's not the empty placeholder - searchShardsResponses.put(cluster, new ClusterSearchShardsResponse(null, null, null)); - successful++; - } else { - searchShardsResponses.put(cluster, ClusterSearchShardsResponse.EMPTY); - skipped++; - } - } - SearchResponse.Clusters clusters = TransportSearchAction.buildClusters(localIndices, remoteIndices, searchShardsResponses); - assertEquals(total, clusters.getTotal()); - assertEquals(successful, clusters.getSuccessful()); - assertEquals(skipped, clusters.getSkipped()); + private MockTransportService startTransport(String id, List knownNodes) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, Version.CURRENT, threadPool); } - private static OriginalIndices randomOriginalIndices() { - int numLocalIndices = randomIntBetween(0, 5); - String[] localIndices = new String[numLocalIndices]; - for (int i = 0; i < numLocalIndices; i++) { - localIndices[i] = randomAlphaOfLengthBetween(3, 10); + public void testCollectSearchShards() throws Exception { + int numClusters = randomIntBetween(2, 10); + MockTransportService[] mockTransportServices = new MockTransportService[numClusters]; + DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; + Map remoteIndicesByCluster = new HashMap<>(); + Settings.Builder builder = Settings.builder(); + for (int i = 0; i < numClusters; i++) { + List knownNodes = new CopyOnWriteArrayList<>(); + MockTransportService remoteSeedTransport = startTransport("node_remote" + i, knownNodes); + mockTransportServices[i] = remoteSeedTransport; + DiscoveryNode remoteSeedNode = remoteSeedTransport.getLocalDiscoNode(); + knownNodes.add(remoteSeedNode); + nodes[i] = remoteSeedNode; + builder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); + remoteIndicesByCluster.put("remote" + i, new OriginalIndices(new String[]{"index"}, IndicesOptions.lenientExpandOpen())); + } + Settings settings = builder.build(); + + try { + try (MockTransportService service = MockTransportService.createNewService(settings, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicReference> response = new AtomicReference<>(); + AtomicInteger skippedClusters = new AtomicInteger(); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertEquals(0, skippedClusters.get()); + assertNotNull(response.get()); + Map map = response.get(); + assertEquals(numClusters, map.size()); + for (int i = 0; i < numClusters; i++) { + String clusterAlias = "remote" + i; + assertTrue(map.containsKey(clusterAlias)); + ClusterSearchShardsResponse shardsResponse = map.get(clusterAlias); + assertEquals(1, shardsResponse.getNodes().length); + } + } + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicReference failure = new AtomicReference<>(); + AtomicInteger skippedClusters = new AtomicInteger(0); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), "index_not_found", null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertEquals(0, skippedClusters.get()); + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); + assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); + } + + int numDisconnectedClusters = randomIntBetween(1, numClusters); + Set disconnectedNodes = new HashSet<>(numDisconnectedClusters); + Set disconnectedNodesIndices = new HashSet<>(numDisconnectedClusters); + while (disconnectedNodes.size() < numDisconnectedClusters) { + int i = randomIntBetween(0, numClusters - 1); + if (disconnectedNodes.add(nodes[i])) { + assertTrue(disconnectedNodesIndices.add(i)); + } + } + + CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters); + RemoteClusterServiceTests.addConnectionListener(remoteClusterService, new TransportConnectionListener() { + @Override + public void onNodeDisconnected(DiscoveryNode node) { + if (disconnectedNodes.remove(node)) { + disconnectedLatch.countDown(); + } + } + }); + for (DiscoveryNode disconnectedNode : disconnectedNodes) { + service.addFailToSendNoConnectRule(disconnectedNode.getAddress()); + } + + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicInteger skippedClusters = new AtomicInteger(0); + AtomicReference failure = new AtomicReference<>(); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertEquals(0, skippedClusters.get()); + assertNotNull(failure.get()); + assertThat(failure.get(), instanceOf(RemoteTransportException.class)); + assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster [")); + assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class)); + } + + //setting skip_unavailable to true for all the disconnected clusters will make the request succeed again + for (int i : disconnectedNodesIndices) { + RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true); + } + + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicInteger skippedClusters = new AtomicInteger(0); + AtomicReference> response = new AtomicReference<>(); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertNotNull(response.get()); + Map map = response.get(); + assertEquals(numClusters - disconnectedNodesIndices.size(), map.size()); + assertEquals(skippedClusters.get(), disconnectedNodesIndices.size()); + for (int i = 0; i < numClusters; i++) { + String clusterAlias = "remote" + i; + if (disconnectedNodesIndices.contains(i)) { + assertFalse(map.containsKey(clusterAlias)); + } else { + assertNotNull(map.get(clusterAlias)); + } + } + } + + //give transport service enough time to realize that the node is down, and to notify the connection listeners + //so that RemoteClusterConnection is left with no connected nodes, hence it will retry connecting next + assertTrue(disconnectedLatch.await(5, TimeUnit.SECONDS)); + + service.clearAllRules(); + if (randomBoolean()) { + for (int i : disconnectedNodesIndices) { + if (randomBoolean()) { + RemoteClusterServiceTests.updateSkipUnavailable(remoteClusterService, "remote" + i, true); + } + + } + } + { + final CountDownLatch latch = new CountDownLatch(1); + AtomicInteger skippedClusters = new AtomicInteger(0); + AtomicReference> response = new AtomicReference<>(); + TransportSearchAction.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, skippedClusters, + remoteIndicesByCluster, remoteClusterService, threadPool, + new LatchedActionListener<>(ActionListener.wrap(response::set, e -> fail("no failures expected")), latch)); + awaitLatch(latch, 5, TimeUnit.SECONDS); + assertEquals(0, skippedClusters.get()); + assertNotNull(response.get()); + Map map = response.get(); + assertEquals(numClusters, map.size()); + for (int i = 0; i < numClusters; i++) { + String clusterAlias = "remote" + i; + assertTrue(map.containsKey(clusterAlias)); + assertNotNull(map.get(clusterAlias)); + } + } + assertEquals(0, service.getConnectionManager().size()); + } + } finally { + for (MockTransportService mockTransportService : mockTransportServices) { + mockTransportService.close(); + } } - return new OriginalIndices(localIndices, IndicesOptions.fromOptions(randomBoolean(), - randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); } } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java new file mode 100644 index 0000000000000..1a6eaff9e5a2e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterAwareClientTests.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest; +import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +public class RemoteClusterAwareClientTests extends ESTestCase { + + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + private MockTransportService startTransport(String id, List knownNodes) { + return RemoteClusterConnectionTests.startTransport(id, knownNodes, Version.CURRENT, threadPool); + } + + public void testSearchShards() throws Exception { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes)) { + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + + try (RemoteClusterAwareClient client = new RemoteClusterAwareClient(Settings.EMPTY, threadPool, service, "cluster1")) { + SearchRequest request = new SearchRequest("test-index"); + CountDownLatch responseLatch = new CountDownLatch(1); + AtomicReference reference = new AtomicReference<>(); + ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") + .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) + .routing(request.routing()); + client.admin().cluster().searchShards(searchShardsRequest, + new LatchedActionListener<>(ActionListener.wrap(reference::set, e -> fail("no failures expected")), responseLatch)); + responseLatch.await(); + assertNotNull(reference.get()); + ClusterSearchShardsResponse clusterSearchShardsResponse = reference.get(); + assertEquals(knownNodes, Arrays.asList(clusterSearchShardsResponse.getNodes())); + } + } + } + } + + public void testSearchShardsThreadContextHeader() { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes); + MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes)) { + knownNodes.add(seedTransport.getLocalDiscoNode()); + knownNodes.add(discoverableTransport.getLocalDiscoNode()); + Collections.shuffle(knownNodes, random()); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + + try (RemoteClusterAwareClient client = new RemoteClusterAwareClient(Settings.EMPTY, threadPool, service, "cluster1")) { + SearchRequest request = new SearchRequest("test-index"); + int numThreads = 10; + ExecutorService executorService = Executors.newFixedThreadPool(numThreads); + for (int i = 0; i < numThreads; i++) { + final String threadId = Integer.toString(i); + executorService.submit(() -> { + ThreadContext threadContext = seedTransport.threadPool.getThreadContext(); + threadContext.putHeader("threadId", threadId); + AtomicReference reference = new AtomicReference<>(); + final ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") + .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) + .routing(request.routing()); + CountDownLatch responseLatch = new CountDownLatch(1); + client.admin().cluster().searchShards(searchShardsRequest, + new LatchedActionListener<>(ActionListener.wrap( + resp -> { + reference.set(resp); + assertEquals(threadId, seedTransport.threadPool.getThreadContext().getHeader("threadId")); + }, + e -> fail("no failures expected")), responseLatch)); + try { + responseLatch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + assertNotNull(reference.get()); + ClusterSearchShardsResponse clusterSearchShardsResponse = reference.get(); + assertEquals(knownNodes, Arrays.asList(clusterSearchShardsResponse.getNodes())); + }); + } + ThreadPool.terminate(executorService, 5, TimeUnit.SECONDS); + } + } + } + } +} diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java index 02e701ed4bc86..308d330d54f61 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterConnectionTests.java @@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -43,7 +42,6 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.CancellableThreads; -import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.core.internal.io.IOUtils; @@ -558,7 +556,7 @@ public void run() { } } - private List>> seedNodes(final DiscoveryNode... seedNodes) { + private static List>> seedNodes(final DiscoveryNode... seedNodes) { if (seedNodes.length == 0) { return Collections.emptyList(); } else if (seedNodes.length == 1) { @@ -570,205 +568,6 @@ private List>> seedNodes(final DiscoveryNo } } - public void testFetchShards() throws Exception { - List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(discoverableTransport.getLocalDiscoNode()); - Collections.shuffle(knownNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - service.start(); - service.acceptIncomingRequests(); - final List>> seedNodes = seedNodes(seedNode); - try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { - if (randomBoolean()) { - updateSeedNodes(connection, seedNodes); - } - if (randomBoolean()) { - connection.updateSkipUnavailable(randomBoolean()); - } - SearchRequest request = new SearchRequest("test-index"); - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") - .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) - .routing(request.routing()); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - responseLatch.await(); - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse clusterSearchShardsResponse = reference.get(); - assertEquals(knownNodes, Arrays.asList(clusterSearchShardsResponse.getNodes())); - assertTrue(connection.assertNoRunningConnections()); - } - } - } - } - - public void testFetchShardsThreadContextHeader() throws Exception { - List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); - MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - knownNodes.add(seedTransport.getLocalDiscoNode()); - knownNodes.add(discoverableTransport.getLocalDiscoNode()); - Collections.shuffle(knownNodes, random()); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - service.start(); - service.acceptIncomingRequests(); - final List>> seedNodes = seedNodes(seedNode); - try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes, service, Integer.MAX_VALUE, n -> true, null)) { - SearchRequest request = new SearchRequest("test-index"); - Thread[] threads = new Thread[10]; - for (int i = 0; i < threads.length; i++) { - final String threadId = Integer.toString(i); - threads[i] = new Thread(() -> { - ThreadContext threadContext = seedTransport.threadPool.getThreadContext(); - threadContext.putHeader("threadId", threadId); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - final ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") - .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) - .routing(request.routing()); - CountDownLatch responseLatch = new CountDownLatch(1); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap( - resp -> { - reference.set(resp); - assertEquals(threadId, seedTransport.threadPool.getThreadContext().getHeader("threadId")); - }, - failReference::set), responseLatch)); - try { - responseLatch.await(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse clusterSearchShardsResponse = reference.get(); - assertEquals(knownNodes, Arrays.asList(clusterSearchShardsResponse.getNodes())); - }); - } - for (int i = 0; i < threads.length; i++) { - threads[i].start(); - } - - for (int i = 0; i < threads.length; i++) { - threads[i].join(); - } - assertTrue(connection.assertNoRunningConnections()); - } - } - } - } - - public void testFetchShardsSkipUnavailable() throws Exception { - List knownNodes = new CopyOnWriteArrayList<>(); - try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { - DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); - knownNodes.add(seedNode); - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - service.start(); - service.acceptIncomingRequests(); - try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", - seedNodes(seedNode), service, Integer.MAX_VALUE, n -> true, null)) { - ConnectionManager connectionManager = connection.getConnectionManager(); - - SearchRequest request = new SearchRequest("test-index"); - ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest("test-index") - .indicesOptions(request.indicesOptions()).local(true).preference(request.preference()) - .routing(request.routing()); - { - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse response = reference.get(); - assertTrue(response != ClusterSearchShardsResponse.EMPTY); - assertEquals(knownNodes, Arrays.asList(response.getNodes())); - } - - CountDownLatch disconnectedLatch = new CountDownLatch(1); - connectionManager.addListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (node.equals(seedNode)) { - disconnectedLatch.countDown(); - } - } - }); - - service.addFailToSendNoConnectRule(seedTransport); - - if (randomBoolean()) { - connection.updateSkipUnavailable(false); - } - { - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap((s) -> { - reference.set(s); - }, failReference::set), responseLatch)); - assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); - assertNotNull(failReference.get()); - assertNull(reference.get()); - assertThat(failReference.get(), instanceOf(TransportException.class)); - } - - connection.updateSkipUnavailable(true); - { - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse response = reference.get(); - assertTrue(response == ClusterSearchShardsResponse.EMPTY); - } - - //give transport service enough time to realize that the node is down, and to notify the connection listeners - //so that RemoteClusterConnection is left with no connected nodes, hence it will retry connecting next - assertTrue(disconnectedLatch.await(10, TimeUnit.SECONDS)); - - if (randomBoolean()) { - connection.updateSkipUnavailable(false); - } - - service.clearAllRules(); - //check that we reconnect once the node is back up - { - CountDownLatch responseLatch = new CountDownLatch(1); - AtomicReference reference = new AtomicReference<>(); - AtomicReference failReference = new AtomicReference<>(); - connection.fetchSearchShards(searchShardsRequest, - new LatchedActionListener<>(ActionListener.wrap(reference::set, failReference::set), responseLatch)); - assertTrue(responseLatch.await(10, TimeUnit.SECONDS)); - assertNull(failReference.get()); - assertNotNull(reference.get()); - ClusterSearchShardsResponse response = reference.get(); - assertTrue(response != ClusterSearchShardsResponse.EMPTY); - assertEquals(knownNodes, Arrays.asList(response.getNodes())); - } - } - } - } - } - public void testTriggerUpdatesConcurrently() throws IOException, InterruptedException { List knownNodes = new CopyOnWriteArrayList<>(); try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index d5671eec21961..60f3ece86bcbe 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -20,9 +20,7 @@ import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; @@ -33,7 +31,6 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.test.transport.MockTransportService; @@ -60,9 +57,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; @@ -711,172 +706,6 @@ public void onFailure(Exception e) { } } - public void testCollectSearchShards() throws Exception { - int numClusters = randomIntBetween(2, 10); - MockTransportService[] mockTransportServices = new MockTransportService[numClusters]; - DiscoveryNode[] nodes = new DiscoveryNode[numClusters]; - Map remoteIndicesByCluster = new HashMap<>(); - Settings.Builder builder = Settings.builder(); - for (int i = 0; i < numClusters; i++) { - List knownNodes = new CopyOnWriteArrayList<>(); - MockTransportService remoteSeedTransport = startTransport("node_remote" + i, knownNodes, Version.CURRENT); - mockTransportServices[i] = remoteSeedTransport; - DiscoveryNode remoteSeedNode = remoteSeedTransport.getLocalDiscoNode(); - knownNodes.add(remoteSeedNode); - nodes[i] = remoteSeedNode; - builder.put("cluster.remote.remote" + i + ".seeds", remoteSeedNode.getAddress().toString()); - remoteIndicesByCluster.put("remote" + i, new OriginalIndices(new String[]{"index"}, IndicesOptions.lenientExpandOpen())); - } - Settings settings = builder.build(); - - try { - try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) { - service.start(); - service.acceptIncomingRequests(); - try (RemoteClusterService remoteClusterService = new RemoteClusterService(settings, service)) { - assertFalse(remoteClusterService.isCrossClusterSearchEnabled()); - remoteClusterService.initializeRemoteClusters(); - assertTrue(remoteClusterService.isCrossClusterSearchEnabled()); - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(failure.get()); - assertNotNull(response.get()); - Map map = response.get(); - assertEquals(numClusters, map.size()); - for (int i = 0; i < numClusters; i++) { - String clusterAlias = "remote" + i; - assertTrue(map.containsKey(clusterAlias)); - ClusterSearchShardsResponse shardsResponse = map.get(clusterAlias); - assertEquals(1, shardsResponse.getNodes().length); - } - } - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), "index_not_found", - null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(response.get()); - assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); - assertEquals(RestStatus.NOT_FOUND, remoteTransportException.status()); - } - int numDisconnectedClusters = randomIntBetween(1, numClusters); - Set disconnectedNodes = new HashSet<>(numDisconnectedClusters); - Set disconnectedNodesIndices = new HashSet<>(numDisconnectedClusters); - while(disconnectedNodes.size() < numDisconnectedClusters) { - int i = randomIntBetween(0, numClusters - 1); - if (disconnectedNodes.add(nodes[i])) { - assertTrue(disconnectedNodesIndices.add(i)); - } - } - - CountDownLatch disconnectedLatch = new CountDownLatch(numDisconnectedClusters); - for (RemoteClusterConnection connection : remoteClusterService.getConnections()) { - connection.getConnectionManager().addListener(new TransportConnectionListener() { - @Override - public void onNodeDisconnected(DiscoveryNode node) { - if (disconnectedNodes.remove(node)) { - disconnectedLatch.countDown(); - } - } - }); - } - - for (DiscoveryNode disconnectedNode : disconnectedNodes) { - service.addFailToSendNoConnectRule(disconnectedNode.getAddress()); - } - - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(response.get()); - assertNotNull(failure.get()); - assertThat(failure.get(), instanceOf(RemoteTransportException.class)); - assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster [")); - assertThat(failure.get().getCause(), instanceOf(NodeDisconnectedException.class)); - } - - //setting skip_unavailable to true for all the disconnected clusters will make the request succeed again - for (int i : disconnectedNodesIndices) { - remoteClusterService.updateSkipUnavailable("remote" + i, true); - } - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(failure.get()); - assertNotNull(response.get()); - Map map = response.get(); - assertEquals(numClusters, map.size()); - for (int i = 0; i < numClusters; i++) { - String clusterAlias = "remote" + i; - assertTrue(map.containsKey(clusterAlias)); - ClusterSearchShardsResponse shardsResponse = map.get(clusterAlias); - if (disconnectedNodesIndices.contains(i)) { - assertTrue(shardsResponse == ClusterSearchShardsResponse.EMPTY); - } else { - assertTrue(shardsResponse != ClusterSearchShardsResponse.EMPTY); - } - } - } - - //give transport service enough time to realize that the node is down, and to notify the connection listeners - //so that RemoteClusterConnection is left with no connected nodes, hence it will retry connecting next - assertTrue(disconnectedLatch.await(5, TimeUnit.SECONDS)); - - service.clearAllRules(); - if (randomBoolean()) { - for (int i : disconnectedNodesIndices) { - if (randomBoolean()) { - remoteClusterService.updateSkipUnavailable("remote" + i, true); - } - - } - } - { - final CountDownLatch latch = new CountDownLatch(1); - AtomicReference> response = new AtomicReference<>(); - AtomicReference failure = new AtomicReference<>(); - remoteClusterService.collectSearchShards(IndicesOptions.lenientExpandOpen(), null, null, remoteIndicesByCluster, - new LatchedActionListener<>(ActionListener.wrap(response::set, failure::set), latch)); - awaitLatch(latch, 5, TimeUnit.SECONDS); - assertNull(failure.get()); - assertNotNull(response.get()); - Map map = response.get(); - assertEquals(numClusters, map.size()); - for (int i = 0; i < numClusters; i++) { - String clusterAlias = "remote" + i; - assertTrue(map.containsKey(clusterAlias)); - ClusterSearchShardsResponse shardsResponse = map.get(clusterAlias); - assertNotSame(ClusterSearchShardsResponse.EMPTY, shardsResponse); - } - } - assertEquals(0, service.getConnectionManager().size()); - } - } - } finally { - for (MockTransportService mockTransportService : mockTransportServices) { - mockTransportService.close(); - } - } - } - public void testRemoteClusterSkipIfDisconnectedSetting() { { Settings settings = Settings.builder() @@ -1079,7 +908,7 @@ public void testRemoteClusterWithProxy() throws Exception { } } - private void updateRemoteCluster(RemoteClusterService service, String clusterAlias, List addresses, String proxyAddress) + private static void updateRemoteCluster(RemoteClusterService service, String clusterAlias, List addresses, String proxyAddress) throws Exception { CountDownLatch latch = new CountDownLatch(1); AtomicReference exceptionAtomicReference = new AtomicReference<>(); @@ -1093,4 +922,40 @@ private void updateRemoteCluster(RemoteClusterService service, String clusterAli throw exceptionAtomicReference.get(); } } + + public static void updateSkipUnavailable(RemoteClusterService service, String clusterAlias, boolean skipUnavailable) { + RemoteClusterConnection connection = service.getRemoteClusterConnection(clusterAlias); + connection.updateSkipUnavailable(skipUnavailable); + } + + public static void addConnectionListener(RemoteClusterService service, TransportConnectionListener listener) { + for (RemoteClusterConnection connection : service.getConnections()) { + ConnectionManager connectionManager = connection.getConnectionManager(); + connectionManager.addListener(listener); + } + } + + public void testSkipUnavailable() { + List knownNodes = new CopyOnWriteArrayList<>(); + try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT)) { + DiscoveryNode seedNode = seedTransport.getLocalDiscoNode(); + knownNodes.add(seedNode); + Settings.Builder builder = Settings.builder(); + builder.putList("cluster.remote.cluster1.seeds", seedTransport.getLocalDiscoNode().getAddress().toString()); + try (MockTransportService service = MockTransportService.createNewService(builder.build(), Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + + assertFalse(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + + if (randomBoolean()) { + updateSkipUnavailable(service.getRemoteClusterService(), "cluster1", false); + assertFalse(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + } + + updateSkipUnavailable(service.getRemoteClusterService(), "cluster1", true); + assertTrue(service.getRemoteClusterService().isSkipUnavailable("cluster1")); + } + } + } } From 6a5d9d942a7537a17e7ef146c6fd0616dc3a815e Mon Sep 17 00:00:00 2001 From: David Roberts Date: Wed, 23 Jan 2019 13:50:31 +0000 Subject: [PATCH 39/39] [TEST] Mute MlMappingsUpgradeIT testMappingsUpgrade Due to https://github.com/elastic/elasticsearch/issues/37763 --- .../java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java index 5602f14ef2267..ca41afe6c39da 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/MlMappingsUpgradeIT.java @@ -38,6 +38,7 @@ protected Collection templatesToWaitFor() { * The purpose of this test is to ensure that when a job is open through a rolling upgrade we upgrade the results * index mappings when it is assigned to an upgraded node even if no other ML endpoint is called after the upgrade */ + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37763") public void testMappingsUpgrade() throws Exception { switch (CLUSTER_TYPE) {