diff --git a/.ci/packer_cache.sh b/.ci/packer_cache.sh index 04511f81281b9..4533213920c3a 100755 --- a/.ci/packer_cache.sh +++ b/.ci/packer_cache.sh @@ -20,5 +20,5 @@ export JAVA_HOME="${HOME}"/.java/${ES_BUILD_JAVA} # We are caching BWC versions too, need these so we can build those export JAVA8_HOME="${HOME}"/.java/java8 export JAVA11_HOME="${HOME}"/.java/java11 -export JAVA12_HOME="${HOME}"/.java/java12 +export JAVA12_HOME="${HOME}"/.java/openjdk12 ./gradlew --parallel clean --scan -Porg.elasticsearch.acceptScanTOS=true -s resolveAllDependencies diff --git a/build.gradle b/build.gradle index 1db42f7efd020..f6c3222a4074a 100644 --- a/build.gradle +++ b/build.gradle @@ -32,7 +32,7 @@ plugins { id 'com.gradle.build-scan' version '2.0.2' id 'base' } -if (properties.get("org.elasticsearch.acceptScanTOS", "false") == "true") { +if (Boolean.valueOf(project.findProperty('org.elasticsearch.acceptScanTOS') ?: "false")) { buildScan { termsOfServiceUrl = 'https://gradle.com/terms-of-service' termsOfServiceAgree = 'yes' @@ -163,7 +163,7 @@ task verifyVersions { */ boolean bwc_tests_enabled = false -final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/40616" /* place a PR link here when committing bwc changes */ +final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/40319" /* place a PR link here when committing bwc changes */ if (bwc_tests_enabled == false) { if (bwc_tests_disabled_issue.isEmpty()) { throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false") diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 9f658c91ab394..be54b2c68f639 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -232,6 +232,11 @@ if (project != rootProject) { if (isLuceneSnapshot) { systemProperty 'test.lucene-snapshot-revision', isLuceneSnapshot[0][1] } + String defaultParallel = System.getProperty('tests.jvms', project.rootProject.ext.defaultParallel) + if (defaultParallel == "auto") { + defaultParallel = Math.max(Runtime.getRuntime().availableProcessors(), 4) + } + maxParallelForks defaultParallel as Integer } check.dependsOn(integTest) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index fb0cd1a41ecaa..75230e27c16c8 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -44,6 +44,7 @@ public class PluginBuildPlugin extends BuildPlugin { public void apply(Project project) { super.apply(project) configureDependencies(project) + // this afterEvaluate must happen before the afterEvaluate added by integTest creation, // so that the file name resolution for installing the plugin will be setup project.afterEvaluate { @@ -69,7 +70,7 @@ public class PluginBuildPlugin extends BuildPlugin { if (isModule) { throw new RuntimeException("Testclusters does not support modules yet"); } else { - project.testClusters.integTestCluster.plugin( + project.testClusters.integTest.plugin( project.file(project.tasks.bundlePlugin.archiveFile) ) } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy index c2fca819ef3e6..8e7dbafb2c2f1 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/RestIntegTestTask.groovy @@ -62,13 +62,13 @@ public class RestIntegTestTask extends DefaultTask { clusterConfig = project.extensions.create("${name}Cluster", ClusterConfiguration.class, project) } else { project.testClusters { - integTestCluster { + "$name" { distribution = 'INTEG_TEST' version = project.version javaHome = project.file(project.ext.runtimeJavaHome) } } - runner.useCluster project.testClusters.integTestCluster + runner.useCluster project.testClusters."$name" } // override/add more for rest tests @@ -81,7 +81,7 @@ public class RestIntegTestTask extends DefaultTask { throw new IllegalArgumentException("tests.rest.cluster and tests.cluster must both be null or non-null") } if (usesTestclusters == true) { - ElasticsearchNode node = project.testClusters.integTestCluster + ElasticsearchNode node = project.testClusters."${name}" runner.systemProperty('tests.rest.cluster', {node.allHttpSocketURI.join(",") }) runner.systemProperty('tests.config.dir', {node.getConfigDir()}) runner.systemProperty('tests.cluster', {node.transportPortURI}) @@ -187,6 +187,10 @@ public class RestIntegTestTask extends DefaultTask { clusterInit.mustRunAfter(tasks) } + public void runner(Closure configure) { + project.tasks.getByName("${name}Runner").configure(configure) + } + /** Print out an excerpt of the log from the given node. */ protected static void printLogExcerpt(NodeInfo nodeInfo) { File logFile = new File(nodeInfo.homeDir, "logs/${nodeInfo.clusterName}.log") diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index c43b5f62b7fcc..4bdef1ff6fd30 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -78,7 +78,6 @@ class VagrantTestPlugin implements Plugin { private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest' private static final BATS = 'bats' private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS" - private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest" /** Boxes that have been supplied and are available for testing **/ List availableBoxes = [] @@ -388,15 +387,6 @@ class VagrantTestPlugin implements Plugin { } } - private static void createPlatformTestTask(Project project) { - project.tasks.create('platformTest') { - group 'Verification' - description "Test unit and integ tests on different platforms using vagrant. See TESTING.asciidoc for details. This test " + - "is unmaintained." - dependsOn 'vagrantCheckVersion' - } - } - private void createBoxListTasks(Project project) { project.tasks.create('listAllBoxes') { group 'Verification' @@ -429,7 +419,6 @@ class VagrantTestPlugin implements Plugin { createSmokeTestTask(project) createPrepareVagrantTestEnvTask(project) createPackagingTestTask(project) - createPlatformTestTask(project) createBoxListTasks(project) } @@ -454,9 +443,6 @@ class VagrantTestPlugin implements Plugin { assert project.tasks.packagingTest != null Task packagingTest = project.tasks.packagingTest - assert project.tasks.platformTest != null - Task platformTest = project.tasks.platformTest - /* * We always use the main project.rootDir as Vagrant's current working directory (VAGRANT_CWD) * so that boxes are not duplicated for every Gradle project that use this VagrantTestPlugin. @@ -610,31 +596,6 @@ class VagrantTestPlugin implements Plugin { packagingTest.dependsOn(javaPackagingTest) } } - - /* - * This test is unmaintained and was created to run on Linux. We won't allow it to run on Windows - * until it's been brought back into maintenance - */ - if (LINUX_BOXES.contains(box)) { - Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { - command 'ssh' - boxName box - environmentVars vagrantEnvVars - dependsOn up - finalizedBy halt - args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}" - } - TaskExecutionAdapter platformReproListener = createReproListener(project, platform.path) - platform.doFirst { - project.gradle.addListener(platformReproListener) - } - platform.doLast { - project.gradle.removeListener(platformReproListener) - } - if (project.extensions.esvagrant.boxes.contains(box)) { - platformTest.dependsOn(platform) - } - } } } diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java index ae221b31f2c6c..4138131d7a150 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testclusters/ElasticsearchNode.java @@ -26,8 +26,10 @@ import org.gradle.api.logging.Logging; import java.io.BufferedReader; +import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.io.UncheckedIOException; import java.net.HttpURLConnection; @@ -39,15 +41,18 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Predicate; +import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -71,6 +76,10 @@ public class ElasticsearchNode { private final LinkedHashMap> waitConditions; private final List plugins = new ArrayList<>(); + private final Map> settings = new LinkedHashMap<>(); + private final Map> keystoreSettings = new LinkedHashMap<>(); + private final Map> systemProperties = new LinkedHashMap<>(); + private final Map> environment = new LinkedHashMap<>(); private final Path confPathRepo; private final Path configFile; @@ -143,6 +152,55 @@ public void plugin(File plugin) { plugin(plugin.toURI()); } + public void keystore(String key, String value) { + addSupplier("Keystore", keystoreSettings, key, value); + } + + public void keystore(String key, Supplier valueSupplier) { + addSupplier("Keystore", keystoreSettings, key, valueSupplier); + } + + public void setting(String key, String value) { + addSupplier("Settings", settings, key, value); + } + + public void setting(String key, Supplier valueSupplier) { + addSupplier("Setting", settings, key, valueSupplier); + } + + public void systemProperty(String key, String value) { + addSupplier("Java System property", systemProperties, key, value); + } + + public void systemProperty(String key, Supplier valueSupplier) { + addSupplier("Java System property", systemProperties, key, valueSupplier); + } + + public void environment(String key, String value) { + addSupplier("Environment variable", environment, key, value); + } + + public void environment(String key, Supplier valueSupplier) { + addSupplier("Environment variable", environment, key, valueSupplier); + } + + private void addSupplier(String name, Map> collector, String key, Supplier valueSupplier) { + requireNonNull(key, name + " key was null when configuring test cluster `" + this + "`"); + requireNonNull(valueSupplier, name + " value supplier was null when configuring test cluster `" + this + "`"); + collector.put(key, valueSupplier); + } + + private void addSupplier(String name, Map> collector, String key, String actualValue) { + requireNonNull(actualValue, name + " value was null when configuring test cluster `" + this + "`"); + addSupplier(name, collector, key, () -> actualValue); + } + + private void checkSuppliers(String name, Map> collector) { + collector.forEach((key, value) -> { + requireNonNull(value.get().toString(), name + " supplied value was null when configuring test cluster `" + this + "`"); + }); + } + public Path getConfigDir() { return configFile.getParent(); } @@ -168,6 +226,8 @@ public File getJavaHome() { return javaHome; } + + private void waitForUri(String description, String uri) { waitConditions.put(description, (node) -> { try { @@ -222,46 +282,79 @@ synchronized void start() { "install", "--batch", plugin.toString()) ); + if (keystoreSettings.isEmpty() == false) { + checkSuppliers("Keystore", keystoreSettings); + runElaticsearchBinScript("elasticsearch-keystore", "create"); + keystoreSettings.forEach((key, value) -> { + runElaticsearchBinScriptWithInput(value.get().toString(), "elasticsearch-keystore", "add", "-x", key); + }); + } + startElasticsearchProcess(); } + private void runElaticsearchBinScriptWithInput(String input, String tool, String... args) { + try (InputStream byteArrayInputStream = new ByteArrayInputStream(input.getBytes(StandardCharsets.UTF_8))) { + services.loggedExec(spec -> { + spec.setEnvironment(getESEnvironment()); + spec.workingDir(workingDir); + spec.executable( + OS.conditionalString() + .onUnix(() -> "./bin/" + tool) + .onWindows(() -> "cmd") + .supply() + ); + spec.args( + OS.>conditional() + .onWindows(() -> { + ArrayList result = new ArrayList<>(); + result.add("/c"); + result.add("bin\\" + tool + ".bat"); + for (String arg : args) { + result.add(arg); + } + return result; + }) + .onUnix(() -> Arrays.asList(args)) + .supply() + ); + spec.setStandardInput(byteArrayInputStream); + + }); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + private void runElaticsearchBinScript(String tool, String... args) { - services.loggedExec(spec -> { - spec.setEnvironment(getESEnvironment()); - spec.workingDir(workingDir); - spec.executable( - OS.conditionalString() - .onUnix(() -> "./bin/" + tool) - .onWindows(() -> "cmd") - .supply() - ); - spec.args( - OS.>conditional() - .onWindows(() -> { - ArrayList result = new ArrayList<>(); - result.add("/c"); - result.add("bin\\" + tool + ".bat"); - for (String arg : args) { - result.add(arg); - } - return result; - }) - .onUnix(() -> Arrays.asList(args)) - .supply() - ); - }); + runElaticsearchBinScriptWithInput("", tool, args); } private Map getESEnvironment() { - Map environment= new HashMap<>(); - environment.put("JAVA_HOME", getJavaHome().getAbsolutePath()); - environment.put("ES_PATH_CONF", configFile.getParent().toString()); - environment.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m"); - environment.put("ES_TMPDIR", tmpDir.toString()); + Map defaultEnv = new HashMap<>(); + defaultEnv.put("JAVA_HOME", getJavaHome().getAbsolutePath()); + defaultEnv.put("ES_PATH_CONF", configFile.getParent().toString()); + String systemPropertiesString = ""; + if (systemProperties.isEmpty() == false) { + checkSuppliers("Java System property", systemProperties); + systemPropertiesString = " " + systemProperties.entrySet().stream() + .map(entry -> "-D" + entry.getKey() + "=" + entry.getValue().get()) + .collect(Collectors.joining(" ")); + } + defaultEnv.put("ES_JAVA_OPTS", "-Xms512m -Xmx512m -ea -esa" + systemPropertiesString); + defaultEnv.put("ES_TMPDIR", tmpDir.toString()); // Windows requires this as it defaults to `c:\windows` despite ES_TMPDIR + defaultEnv.put("TMP", tmpDir.toString()); + + Set commonKeys = new HashSet<>(environment.keySet()); + commonKeys.retainAll(defaultEnv.keySet()); + if (commonKeys.isEmpty() == false) { + throw new IllegalStateException("testcluster does not allow setting the following env vars " + commonKeys); + } - environment.put("TMP", tmpDir.toString()); - return environment; + checkSuppliers("Environment variable", environment); + environment.forEach((key, value) -> defaultEnv.put(key, value.get().toString())); + return defaultEnv; } private void startElasticsearchProcess() { @@ -445,37 +538,49 @@ private void syncWithLinks(Path sourceRoot, Path destinationRoot) { } private void createConfiguration() { - LinkedHashMap config = new LinkedHashMap<>(); + LinkedHashMap defaultConfig = new LinkedHashMap<>(); String nodeName = safeName(name); - config.put("cluster.name",nodeName); - config.put("node.name", nodeName); - config.put("path.repo", confPathRepo.toAbsolutePath().toString()); - config.put("path.data", confPathData.toAbsolutePath().toString()); - config.put("path.logs", confPathLogs.toAbsolutePath().toString()); - config.put("path.shared_data", workingDir.resolve("sharedData").toString()); - config.put("node.attr.testattr", "test"); - config.put("node.portsfile", "true"); - config.put("http.port", "0"); - config.put("transport.tcp.port", "0"); + defaultConfig.put("cluster.name",nodeName); + defaultConfig.put("node.name", nodeName); + defaultConfig.put("path.repo", confPathRepo.toAbsolutePath().toString()); + defaultConfig.put("path.data", confPathData.toAbsolutePath().toString()); + defaultConfig.put("path.logs", confPathLogs.toAbsolutePath().toString()); + defaultConfig.put("path.shared_data", workingDir.resolve("sharedData").toString()); + defaultConfig.put("node.attr.testattr", "test"); + defaultConfig.put("node.portsfile", "true"); + defaultConfig.put("http.port", "0"); + defaultConfig.put("transport.tcp.port", "0"); // Default the watermarks to absurdly low to prevent the tests from failing on nodes without enough disk space - config.put("cluster.routing.allocation.disk.watermark.low", "1b"); - config.put("cluster.routing.allocation.disk.watermark.high", "1b"); + defaultConfig.put("cluster.routing.allocation.disk.watermark.low", "1b"); + defaultConfig.put("cluster.routing.allocation.disk.watermark.high", "1b"); // increase script compilation limit since tests can rapid-fire script compilations - config.put("script.max_compilations_rate", "2048/1m"); + defaultConfig.put("script.max_compilations_rate", "2048/1m"); if (Version.fromString(version).getMajor() >= 6) { - config.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); + defaultConfig.put("cluster.routing.allocation.disk.watermark.flood_stage", "1b"); } if (Version.fromString(version).getMajor() >= 7) { - config.put("cluster.initial_master_nodes", "[" + nodeName + "]"); + defaultConfig.put("cluster.initial_master_nodes", "[" + nodeName + "]"); } + checkSuppliers("Settings", settings); + Map userConfig = settings.entrySet().stream() + .collect(Collectors.toMap(entry -> entry.getKey(), entry -> entry.getValue().get().toString())); + HashSet overriden = new HashSet<>(defaultConfig.keySet()); + overriden.retainAll(userConfig.keySet()); + if (overriden.isEmpty() ==false) { + throw new IllegalArgumentException("Testclusters does not allow the following settings to be changed:" + overriden); + } + try { // We create hard links for the distribution, so we need to remove the config file before writing it // to prevent the changes to reflect across all copies. Files.delete(configFile); Files.write( configFile, - config.entrySet().stream() + Stream.concat( + userConfig.entrySet().stream(), + defaultConfig.entrySet().stream() + ) .map(entry -> entry.getKey() + ": " + entry.getValue()) .collect(Collectors.joining("\n")) .getBytes(StandardCharsets.UTF_8) diff --git a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java index 59cb851974cb5..57f77d6d1a256 100644 --- a/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java +++ b/buildSrc/src/main/java/org/elasticsearch/gradle/testfixtures/TestFixturesPlugin.java @@ -31,7 +31,6 @@ import org.gradle.api.Task; import org.gradle.api.plugins.BasePlugin; import org.gradle.api.plugins.ExtraPropertiesExtension; -import org.gradle.api.tasks.Input; import org.gradle.api.tasks.TaskContainer; import java.lang.reflect.InvocationTargetException; @@ -104,6 +103,7 @@ public void apply(Project project) { "but none could be found so these will be skipped", project.getPath() ); disableTaskByType(tasks, getTaskClass("com.carrotsearch.gradle.junit4.RandomizedTestingTask")); + disableTaskByType(tasks, getTaskClass("org.elasticsearch.gradle.test.RestIntegTestTask")); // conventions are not honored when the tasks are disabled disableTaskByType(tasks, TestingConventionsTasks.class); disableTaskByType(tasks, ComposeUp.class); @@ -122,6 +122,7 @@ public void apply(Project project) { fixtureProject, (name, port) -> setSystemProperty(task, name, port) ); + task.dependsOn(fixtureProject.getTasks().getByName("postProcessFixture")); }) ); @@ -155,7 +156,6 @@ private void configureServiceInfoForTask(Task task, Project fixtureProject, BiCo ); } - @Input public boolean dockerComposeSupported(Project project) { if (OS.current().equals(OS.WINDOWS)) { return false; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java index 5688324643182..b758968f0a98a 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameClient.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; @@ -275,7 +277,7 @@ public StopDataFrameTransformResponse stopDataFrameTransform(StopDataFrameTransf * @param listener Listener to be notified upon request completion */ public void stopDataFrameTransformAsync(StopDataFrameTransformRequest request, RequestOptions options, - ActionListener listener) { + ActionListener listener) { restHighLevelClient.performRequestAsyncAndParseEntity(request, DataFrameRequestConverters::stopDataFrameTransform, options, @@ -283,4 +285,44 @@ public void stopDataFrameTransformAsync(StopDataFrameTransformRequest request, R listener, Collections.emptySet()); } + + /** + * Get one or more data frame transform configurations + *

+ * For additional info + * see Get Data Frame transform documentation + * + * @param request The get data frame transform request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @return An GetDataFrameTransformResponse containing the requested transforms + * @throws IOException when there is a serialization issue sending the request or receiving the response + */ + public GetDataFrameTransformResponse getDataFrameTransform(GetDataFrameTransformRequest request, RequestOptions options) + throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, + DataFrameRequestConverters::getDataFrameTransform, + options, + GetDataFrameTransformResponse::fromXContent, + Collections.emptySet()); + } + + /** + * Get one or more data frame transform configurations asynchronously and notifies listener on completion + *

+ * For additional info + * see Get Data Frame transform documentation + * + * @param request The get data frame transform request + * @param options Additional request options (e.g. headers), use {@link RequestOptions#DEFAULT} if nothing needs to be customized + * @param listener Listener to be notified upon request completion + */ + public void getDataFrameTransformAsync(GetDataFrameTransformRequest request, RequestOptions options, + ActionListener listener) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, + DataFrameRequestConverters::getDataFrameTransform, + options, + GetDataFrameTransformResponse::fromXContent, + listener, + Collections.emptySet()); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java index df1207bb8a77b..309a37fedf8cd 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/DataFrameRequestConverters.java @@ -24,11 +24,13 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; import org.elasticsearch.client.dataframe.StartDataFrameTransformRequest; import org.elasticsearch.client.dataframe.StopDataFrameTransformRequest; +import org.elasticsearch.common.Strings; import java.io.IOException; @@ -49,6 +51,21 @@ static Request putDataFrameTransform(PutDataFrameTransformRequest putRequest) th return request; } + static Request getDataFrameTransform(GetDataFrameTransformRequest getRequest) { + String endpoint = new RequestConverters.EndpointBuilder() + .addPathPartAsIs("_data_frame", "transforms") + .addPathPart(Strings.collectionToCommaDelimitedString(getRequest.getId())) + .build(); + Request request = new Request(HttpGet.METHOD_NAME, endpoint); + if (getRequest.getFrom() != null) { + request.addParameter("from", getRequest.getFrom().toString()); + } + if (getRequest.getSize() != null) { + request.addParameter("size", getRequest.getSize().toString()); + } + return request; + } + static Request deleteDataFrameTransform(DeleteDataFrameTransformRequest request) { String endpoint = new RequestConverters.EndpointBuilder() .addPathPartAsIs("_data_frame", "transforms") diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index d0917b8d45461..77eac4a6e2a85 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -511,7 +511,7 @@ public final void bulkAsync(BulkRequest bulkRequest, RequestOptions options, Act */ public final BulkByScrollResponse reindex(ReindexRequest reindexRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( - reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, emptySet() + reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, singleton(409) ); } @@ -537,7 +537,7 @@ public final TaskSubmissionResponse submitReindexTask(ReindexRequest reindexRequ */ public final void reindexAsync(ReindexRequest reindexRequest, RequestOptions options, ActionListener listener) { performRequestAsyncAndParseEntity( - reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, listener, emptySet() + reindexRequest, RequestConverters::reindex, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } @@ -551,7 +551,7 @@ public final void reindexAsync(ReindexRequest reindexRequest, RequestOptions opt */ public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQueryRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( - updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, emptySet() + updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, singleton(409) ); } @@ -566,7 +566,7 @@ public final BulkByScrollResponse updateByQuery(UpdateByQueryRequest updateByQue public final void updateByQueryAsync(UpdateByQueryRequest updateByQueryRequest, RequestOptions options, ActionListener listener) { performRequestAsyncAndParseEntity( - updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet() + updateByQueryRequest, RequestConverters::updateByQuery, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } @@ -580,7 +580,7 @@ public final void updateByQueryAsync(UpdateByQueryRequest updateByQueryRequest, */ public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options) throws IOException { return performRequestAndParseEntity( - deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, emptySet() + deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, singleton(409) ); } @@ -595,7 +595,7 @@ public final BulkByScrollResponse deleteByQuery(DeleteByQueryRequest deleteByQue public final void deleteByQueryAsync(DeleteByQueryRequest deleteByQueryRequest, RequestOptions options, ActionListener listener) { performRequestAsyncAndParseEntity( - deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, listener, emptySet() + deleteByQueryRequest, RequestConverters::deleteByQuery, options, BulkByScrollResponse::fromXContent, listener, singleton(409) ); } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java new file mode 100644 index 0000000000000..9577a0f5c72bf --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequest.java @@ -0,0 +1,96 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.client.Validatable; +import org.elasticsearch.client.ValidationException; + +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +public class GetDataFrameTransformRequest implements Validatable { + + private final List ids; + private Integer from; + private Integer size; + + /** + * Helper method to create a request that will get ALL Data Frame Transforms + * @return new {@link GetDataFrameTransformRequest} object for the id "_all" + */ + public static GetDataFrameTransformRequest getAllDataFrameTransformsRequest() { + return new GetDataFrameTransformRequest("_all"); + } + + public GetDataFrameTransformRequest(String... ids) { + this.ids = Arrays.asList(ids); + } + + public List getId() { + return ids; + } + + public Integer getFrom() { + return from; + } + + public void setFrom(Integer from) { + this.from = from; + } + + public Integer getSize() { + return size; + } + + public void setSize(Integer size) { + this.size = size; + } + + @Override + public Optional validate() { + if (ids == null || ids.isEmpty()) { + ValidationException validationException = new ValidationException(); + validationException.addValidationError("data frame transform id must not be null"); + return Optional.of(validationException); + } else { + return Optional.empty(); + } + } + + @Override + public int hashCode() { + return Objects.hash(ids); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + + if (obj == null || getClass() != obj.getClass()) { + return false; + } + GetDataFrameTransformRequest other = (GetDataFrameTransformRequest) obj; + return Objects.equals(ids, other.ids); + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java new file mode 100644 index 0000000000000..93fc91f08cee1 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponse.java @@ -0,0 +1,142 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class GetDataFrameTransformResponse { + + public static final ParseField TRANSFORMS = new ParseField("transforms"); + public static final ParseField INVALID_TRANSFORMS = new ParseField("invalid_transforms"); + public static final ParseField COUNT = new ParseField("count"); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser INVALID_TRANSFORMS_PARSER = + new ConstructingObjectParser<>("invalid_transforms", true, args -> new InvalidTransforms((List) args[0])); + + @SuppressWarnings("unchecked") + static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "get_data_frame_transform", true, args -> new GetDataFrameTransformResponse( + (List) args[0], (int) args[1], (InvalidTransforms) args[2])); + static { + // Discard the count field which is the size of the transforms array + INVALID_TRANSFORMS_PARSER.declareInt((a, b) -> {}, COUNT); + INVALID_TRANSFORMS_PARSER.declareStringArray(constructorArg(), TRANSFORMS); + + PARSER.declareObjectArray(constructorArg(), DataFrameTransformConfig.PARSER::apply, TRANSFORMS); + PARSER.declareInt(constructorArg(), COUNT); + PARSER.declareObject(optionalConstructorArg(), INVALID_TRANSFORMS_PARSER::apply, INVALID_TRANSFORMS); + } + + public static GetDataFrameTransformResponse fromXContent(final XContentParser parser) { + return GetDataFrameTransformResponse.PARSER.apply(parser, null); + } + + private List transformConfigurations; + private int count; + private InvalidTransforms invalidTransforms; + + public GetDataFrameTransformResponse(List transformConfigurations, + int count, + @Nullable InvalidTransforms invalidTransforms) { + this.transformConfigurations = transformConfigurations; + this.count = count; + this.invalidTransforms = invalidTransforms; + } + + @Nullable + public InvalidTransforms getInvalidTransforms() { + return invalidTransforms; + } + + public int getCount() { + return count; + } + + public List getTransformConfigurations() { + return transformConfigurations; + } + + @Override + public int hashCode() { + return Objects.hash(transformConfigurations, count, invalidTransforms); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final GetDataFrameTransformResponse that = (GetDataFrameTransformResponse) other; + return Objects.equals(this.transformConfigurations, that.transformConfigurations) + && Objects.equals(this.count, that.count) + && Objects.equals(this.invalidTransforms, that.invalidTransforms); + } + + static class InvalidTransforms { + private final List transformIds; + + InvalidTransforms(List transformIds) { + this.transformIds = transformIds; + } + + public int getCount() { + return transformIds.size(); + } + + public List getTransformIds() { + return transformIds; + } + + @Override + public int hashCode() { + return Objects.hash(transformIds); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + final InvalidTransforms that = (InvalidTransforms) other; + return Objects.equals(this.transformIds, that.transformIds); + } + } +} diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java index 0dd648e7e3516..88670a7b36d1f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/dataframe/transforms/DataFrameTransformConfig.java @@ -46,7 +46,7 @@ public class DataFrameTransformConfig implements ToXContentObject { private final DestConfig dest; private final PivotConfig pivotConfig; - public static final ConstructingObjectParser PARSER = + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("data_frame_transform", true, (args) -> { String id = (String) args[0]; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java index 4d2a000a00c89..3040b8a121cf7 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/graph/GraphExploreRequest.java @@ -108,10 +108,26 @@ public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { return this; } + /** + * The document types to execute the explore against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public String[] types() { return this.types; } + /** + * The document types to execute the explore request against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public GraphExploreRequest types(String... types) { this.types = types; return this; diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java index 9cb78dd9c83ec..447c67abe3293 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/ExpressionRoleMapping.java @@ -29,8 +29,10 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; /** * A representation of a single role-mapping. @@ -42,13 +44,14 @@ public final class ExpressionRoleMapping { @SuppressWarnings("unchecked") static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("role-mapping", true, - (args, name) -> new ExpressionRoleMapping(name, (RoleMapperExpression) args[0], (List) args[1], - (Map) args[2], (boolean) args[3])); + (args, name) -> new ExpressionRoleMapping(name, (RoleMapperExpression) args[0], (List) args[1], + (List) args[2], (Map) args[3], (boolean) args[4])); static { PARSER.declareField(constructorArg(), (parser, context) -> RoleMapperExpressionParser.fromXContent(parser), Fields.RULES, ObjectParser.ValueType.OBJECT); - PARSER.declareStringArray(constructorArg(), Fields.ROLES); + PARSER.declareStringArray(optionalConstructorArg(), Fields.ROLES); + PARSER.declareObjectArray(optionalConstructorArg(), (parser, ctx) -> TemplateRoleName.fromXContent(parser), Fields.ROLE_TEMPLATES); PARSER.declareField(constructorArg(), XContentParser::map, Fields.METADATA, ObjectParser.ValueType.OBJECT); PARSER.declareBoolean(constructorArg(), Fields.ENABLED); } @@ -56,6 +59,7 @@ public final class ExpressionRoleMapping { private final String name; private final RoleMapperExpression expression; private final List roles; + private final List roleTemplates; private final Map metadata; private final boolean enabled; @@ -70,10 +74,11 @@ public final class ExpressionRoleMapping { * @param enabled a flag when {@code true} signifies the role mapping is active */ public ExpressionRoleMapping(final String name, final RoleMapperExpression expr, final List roles, - final Map metadata, boolean enabled) { + final List templates, final Map metadata, boolean enabled) { this.name = name; this.expression = expr; - this.roles = Collections.unmodifiableList(roles); + this.roles = roles == null ? Collections.emptyList() : Collections.unmodifiableList(roles); + this.roleTemplates = templates == null ? Collections.emptyList() : Collections.unmodifiableList(templates); this.metadata = (metadata == null) ? Collections.emptyMap() : Collections.unmodifiableMap(metadata); this.enabled = enabled; } @@ -90,6 +95,10 @@ public List getRoles() { return roles; } + public List getRoleTemplates() { + return roleTemplates; + } + public Map getMetadata() { return metadata; } @@ -99,53 +108,26 @@ public boolean isEnabled() { } @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + (enabled ? 1231 : 1237); - result = prime * result + ((expression == null) ? 0 : expression.hashCode()); - result = prime * result + ((metadata == null) ? 0 : metadata.hashCode()); - result = prime * result + ((name == null) ? 0 : name.hashCode()); - result = prime * result + ((roles == null) ? 0 : roles.hashCode()); - return result; + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + final ExpressionRoleMapping that = (ExpressionRoleMapping) o; + return this.enabled == that.enabled && + Objects.equals(this.name, that.name) && + Objects.equals(this.expression, that.expression) && + Objects.equals(this.roles, that.roles) && + Objects.equals(this.roleTemplates, that.roleTemplates) && + Objects.equals(this.metadata, that.metadata); } @Override - public boolean equals(Object obj) { - if (this == obj) - return true; - if (obj == null) - return false; - if (getClass() != obj.getClass()) - return false; - final ExpressionRoleMapping other = (ExpressionRoleMapping) obj; - if (enabled != other.enabled) - return false; - if (expression == null) { - if (other.expression != null) - return false; - } else if (!expression.equals(other.expression)) - return false; - if (metadata == null) { - if (other.metadata != null) - return false; - } else if (!metadata.equals(other.metadata)) - return false; - if (name == null) { - if (other.name != null) - return false; - } else if (!name.equals(other.name)) - return false; - if (roles == null) { - if (other.roles != null) - return false; - } else if (!roles.equals(other.roles)) - return false; - return true; + public int hashCode() { + return Objects.hash(name, expression, roles, roleTemplates, metadata, enabled); } public interface Fields { ParseField ROLES = new ParseField("roles"); + ParseField ROLE_TEMPLATES = new ParseField("role_templates"); ParseField ENABLED = new ParseField("enabled"); ParseField RULES = new ParseField("rules"); ParseField METADATA = new ParseField("metadata"); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingRequest.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingRequest.java index b8da17da72dad..9a9e0fa62f96b 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingRequest.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/PutRoleMappingRequest.java @@ -40,22 +40,34 @@ public final class PutRoleMappingRequest implements Validatable, ToXContentObjec private final String name; private final boolean enabled; private final List roles; + private final List roleTemplates; private final RoleMapperExpression rules; private final Map metadata; private final RefreshPolicy refreshPolicy; + @Deprecated public PutRoleMappingRequest(final String name, final boolean enabled, final List roles, final RoleMapperExpression rules, - @Nullable final Map metadata, @Nullable final RefreshPolicy refreshPolicy) { + @Nullable final Map metadata, @Nullable final RefreshPolicy refreshPolicy) { + this(name, enabled, roles, Collections.emptyList(), rules, metadata, refreshPolicy); + } + + public PutRoleMappingRequest(final String name, final boolean enabled, final List roles, final List templates, + final RoleMapperExpression rules, @Nullable final Map metadata, + @Nullable final RefreshPolicy refreshPolicy) { if (Strings.hasText(name) == false) { throw new IllegalArgumentException("role-mapping name is missing"); } this.name = name; this.enabled = enabled; - if (roles == null || roles.isEmpty()) { - throw new IllegalArgumentException("role-mapping roles are missing"); + this.roles = Collections.unmodifiableList(Objects.requireNonNull(roles, "role-mapping roles cannot be null")); + this.roleTemplates = Collections.unmodifiableList(Objects.requireNonNull(templates, "role-mapping role_templates cannot be null")); + if (this.roles.isEmpty() && this.roleTemplates.isEmpty()) { + throw new IllegalArgumentException("in a role-mapping, one of roles or role_templates is required"); + } + if (this.roles.isEmpty() == false && this.roleTemplates.isEmpty() == false) { + throw new IllegalArgumentException("in a role-mapping, cannot specify both roles and role_templates"); } - this.roles = Collections.unmodifiableList(roles); this.rules = Objects.requireNonNull(rules, "role-mapping rules are missing"); this.metadata = (metadata == null) ? Collections.emptyMap() : metadata; this.refreshPolicy = (refreshPolicy == null) ? RefreshPolicy.getDefault() : refreshPolicy; @@ -73,6 +85,10 @@ public List getRoles() { return roles; } + public List getRoleTemplates() { + return roleTemplates; + } + public RoleMapperExpression getRules() { return rules; } @@ -87,7 +103,7 @@ public RefreshPolicy getRefreshPolicy() { @Override public int hashCode() { - return Objects.hash(name, enabled, refreshPolicy, roles, rules, metadata); + return Objects.hash(name, enabled, refreshPolicy, roles, roleTemplates, rules, metadata); } @Override @@ -104,11 +120,12 @@ public boolean equals(Object obj) { final PutRoleMappingRequest other = (PutRoleMappingRequest) obj; return (enabled == other.enabled) && - (refreshPolicy == other.refreshPolicy) && - Objects.equals(name, other.name) && - Objects.equals(roles, other.roles) && - Objects.equals(rules, other.rules) && - Objects.equals(metadata, other.metadata); + (refreshPolicy == other.refreshPolicy) && + Objects.equals(name, other.name) && + Objects.equals(roles, other.roles) && + Objects.equals(roleTemplates, other.roleTemplates) && + Objects.equals(rules, other.rules) && + Objects.equals(metadata, other.metadata); } @Override @@ -116,9 +133,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); builder.field("enabled", enabled); builder.field("roles", roles); + builder.field("role_templates", roleTemplates); builder.field("rules", rules); builder.field("metadata", metadata); return builder.endObject(); } - } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/security/TemplateRoleName.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/TemplateRoleName.java new file mode 100644 index 0000000000000..a6263cee69d19 --- /dev/null +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/security/TemplateRoleName.java @@ -0,0 +1,117 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.security; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.common.xcontent.XContentType; + +import java.io.IOException; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * A role name that uses a dynamic template. + */ +public class TemplateRoleName implements ToXContentObject { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("template-role-name", + true, args -> new TemplateRoleName((String) args[0], (Format) args[1])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), Fields.TEMPLATE); + PARSER.declareField(optionalConstructorArg(), Format::fromXContent, Fields.FORMAT, ObjectParser.ValueType.STRING); + } + private final String template; + private final Format format; + + public TemplateRoleName(String template, Format format) { + this.template = Objects.requireNonNull(template); + this.format = Objects.requireNonNull(format); + } + + public TemplateRoleName(Map template, Format format) throws IOException { + this(Strings.toString(XContentBuilder.builder(XContentType.JSON.xContent()).map(template)), format); + } + + public String getTemplate() { + return template; + } + + public Format getFormat() { + return format; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TemplateRoleName that = (TemplateRoleName) o; + return Objects.equals(this.template, that.template) && + this.format == that.format; + } + + @Override + public int hashCode() { + return Objects.hash(template, format); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(Fields.TEMPLATE.getPreferredName(), template) + .field(Fields.FORMAT.getPreferredName(), format.name().toLowerCase(Locale.ROOT)) + .endObject(); + } + + static TemplateRoleName fromXContent(XContentParser parser) throws IOException { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, parser.currentToken(), parser::getTokenLocation); + return PARSER.parse(parser, null); + } + + + public enum Format { + STRING, JSON; + + private static Format fromXContent(XContentParser parser) throws IOException { + XContentParserUtils.ensureExpectedToken(XContentParser.Token.VALUE_STRING, parser.currentToken(), parser::getTokenLocation); + return Format.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } + } + + public interface Fields { + ParseField TEMPLATE = new ParseField("template"); + ParseField FORMAT = new ParseField("format"); + } + +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java index e2102236cc422..6c161444e2475 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/CrudIT.java @@ -21,12 +21,8 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; -import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkProcessor; import org.elasticsearch.action.bulk.BulkRequest; @@ -39,7 +35,6 @@ import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexResponse; -import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateResponse; @@ -58,12 +53,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.index.query.IdsQueryBuilder; -import org.elasticsearch.index.reindex.BulkByScrollResponse; -import org.elasticsearch.index.reindex.DeleteByQueryAction; -import org.elasticsearch.index.reindex.DeleteByQueryRequest; -import org.elasticsearch.index.reindex.UpdateByQueryAction; -import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.document.RestBulkAction; import org.elasticsearch.rest.action.document.RestDeleteAction; @@ -74,8 +63,6 @@ import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; -import org.elasticsearch.tasks.RawTaskStatus; -import org.elasticsearch.tasks.TaskId; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; import org.joda.time.format.DateTimeFormat; @@ -85,18 +72,12 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.instanceOf; -import static org.hamcrest.Matchers.lessThan; public class CrudIT extends ESRestHighLevelClientTestCase { @@ -137,7 +118,7 @@ public void testDelete() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][" + docId + "]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[" + docId + "]: " + "version conflict, required seqNo [2], primary term [2]. current document has seqNo [3] and primary term [1]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); @@ -166,7 +147,7 @@ public void testDelete() throws IOException { execute(deleteRequest, highLevelClient()::delete, highLevelClient()::deleteAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][" + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[" + docId + "]: version conflict, current version [12] is higher or equal to the one provided [10]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } @@ -301,7 +282,7 @@ public void testGet() throws IOException { ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(getRequest, highLevelClient()::get, highLevelClient()::getAsync)); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[_doc][id]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, " + "reason=[id]: " + "version conflict, current version [1] is different than the one provided [2]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); } @@ -527,7 +508,7 @@ public void testIndex() throws IOException { execute(wrongRequest, highLevelClient()::index, highLevelClient()::indexAsync); }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][id]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[id]: " + "version conflict, required seqNo [1], primary term [5]. current document has seqNo [2] and primary term [1]]", exception.getMessage()); assertEquals("index", exception.getMetadata("es.index").get(0)); @@ -574,7 +555,7 @@ public void testIndex() throws IOException { }); assertEquals(RestStatus.CONFLICT, exception.status()); - assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[_doc][with_create_op_type]: " + + assertEquals("Elasticsearch exception [type=version_conflict_engine_exception, reason=[with_create_op_type]: " + "version conflict, document already exists (current version [1])]", exception.getMessage()); } } @@ -857,230 +838,6 @@ public void testBulk() throws IOException { validateBulkResponses(nbItems, errors, bulkResponse, bulkRequest); } - private TaskId findTaskToRethrottle(String actionName) throws IOException { - long start = System.nanoTime(); - ListTasksRequest request = new ListTasksRequest(); - request.setActions(actionName); - request.setDetailed(true); - do { - ListTasksResponse list = highLevelClient().tasks().list(request, RequestOptions.DEFAULT); - list.rethrowFailures("Finding tasks to rethrottle"); - assertThat("tasks are left over from the last execution of this test", - list.getTaskGroups(), hasSize(lessThan(2))); - if (0 == list.getTaskGroups().size()) { - // The parent task hasn't started yet - continue; - } - TaskGroup taskGroup = list.getTaskGroups().get(0); - assertThat(taskGroup.getChildTasks(), empty()); - return taskGroup.getTaskInfo().getTaskId(); - } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); - throw new AssertionError("Couldn't find tasks to rethrottle. Here are the running tasks " + - highLevelClient().tasks().list(request, RequestOptions.DEFAULT)); - } - - public void testUpdateByQuery() throws Exception { - final String sourceIndex = "source1"; - { - // Prepare - Settings settings = Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 0) - .build(); - createIndex(sourceIndex, settings); - assertEquals( - RestStatus.OK, - highLevelClient().bulk( - new BulkRequest() - .add(new IndexRequest(sourceIndex).id("1") - .source(Collections.singletonMap("foo", 1), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("2") - .source(Collections.singletonMap("foo", 2), XContentType.JSON)) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE), - RequestOptions.DEFAULT - ).status() - ); - } - { - // test1: create one doc in dest - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); - updateByQueryRequest.indices(sourceIndex); - updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); - updateByQueryRequest.setRefresh(true); - BulkByScrollResponse bulkResponse = - execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); - assertEquals(1, bulkResponse.getTotal()); - assertEquals(1, bulkResponse.getUpdated()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(1, bulkResponse.getBatches()); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - } - { - // test2: update using script - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); - updateByQueryRequest.indices(sourceIndex); - updateByQueryRequest.setScript(new Script("if (ctx._source.foo == 2) ctx._source.foo++;")); - updateByQueryRequest.setRefresh(true); - BulkByScrollResponse bulkResponse = - execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); - assertEquals(2, bulkResponse.getTotal()); - assertEquals(2, bulkResponse.getUpdated()); - assertEquals(0, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(1, bulkResponse.getBatches()); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - assertEquals( - 3, - (int) (highLevelClient().get(new GetRequest(sourceIndex, "2"), RequestOptions.DEFAULT) - .getSourceAsMap().get("foo")) - ); - } - { - // test update-by-query rethrottling - UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); - updateByQueryRequest.indices(sourceIndex); - updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); - updateByQueryRequest.setRefresh(true); - - // this following settings are supposed to halt reindexing after first document - updateByQueryRequest.setBatchSize(1); - updateByQueryRequest.setRequestsPerSecond(0.00001f); - final CountDownLatch taskFinished = new CountDownLatch(1); - highLevelClient().updateByQueryAsync(updateByQueryRequest, RequestOptions.DEFAULT, new ActionListener() { - - @Override - public void onResponse(BulkByScrollResponse response) { - taskFinished.countDown(); - } - - @Override - public void onFailure(Exception e) { - fail(e.toString()); - } - }); - - TaskId taskIdToRethrottle = findTaskToRethrottle(UpdateByQueryAction.NAME); - float requestsPerSecond = 1000f; - ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync); - assertThat(response.getTasks(), hasSize(1)); - assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); - assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); - assertEquals(Float.toString(requestsPerSecond), - ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); - taskFinished.await(2, TimeUnit.SECONDS); - - // any rethrottling after the update-by-query is done performed with the same taskId should result in a failure - response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync); - assertTrue(response.getTasks().isEmpty()); - assertFalse(response.getNodeFailures().isEmpty()); - assertEquals(1, response.getNodeFailures().size()); - assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", - response.getNodeFailures().get(0).getCause().getMessage()); - } - } - - public void testDeleteByQuery() throws Exception { - final String sourceIndex = "source1"; - { - // Prepare - Settings settings = Settings.builder() - .put("number_of_shards", 1) - .put("number_of_replicas", 0) - .build(); - createIndex(sourceIndex, settings); - assertEquals( - RestStatus.OK, - highLevelClient().bulk( - new BulkRequest() - .add(new IndexRequest(sourceIndex).id("1") - .source(Collections.singletonMap("foo", 1), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("2") - .source(Collections.singletonMap("foo", 2), XContentType.JSON)) - .add(new IndexRequest(sourceIndex).id("3") - .source(Collections.singletonMap("foo", 3), XContentType.JSON)) - .setRefreshPolicy(RefreshPolicy.IMMEDIATE), - RequestOptions.DEFAULT - ).status() - ); - } - { - // test1: delete one doc - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); - deleteByQueryRequest.indices(sourceIndex); - deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); - deleteByQueryRequest.setRefresh(true); - BulkByScrollResponse bulkResponse = - execute(deleteByQueryRequest, highLevelClient()::deleteByQuery, highLevelClient()::deleteByQueryAsync); - assertEquals(1, bulkResponse.getTotal()); - assertEquals(1, bulkResponse.getDeleted()); - assertEquals(0, bulkResponse.getNoops()); - assertEquals(0, bulkResponse.getVersionConflicts()); - assertEquals(1, bulkResponse.getBatches()); - assertTrue(bulkResponse.getTook().getMillis() > 0); - assertEquals(1, bulkResponse.getBatches()); - assertEquals(0, bulkResponse.getBulkFailures().size()); - assertEquals(0, bulkResponse.getSearchFailures().size()); - assertEquals( - 2, - highLevelClient().search(new SearchRequest(sourceIndex), RequestOptions.DEFAULT).getHits().getTotalHits().value - ); - } - { - // test delete-by-query rethrottling - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); - deleteByQueryRequest.indices(sourceIndex); - deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("2", "3")); - deleteByQueryRequest.setRefresh(true); - - // this following settings are supposed to halt reindexing after first document - deleteByQueryRequest.setBatchSize(1); - deleteByQueryRequest.setRequestsPerSecond(0.00001f); - final CountDownLatch taskFinished = new CountDownLatch(1); - highLevelClient().deleteByQueryAsync(deleteByQueryRequest, RequestOptions.DEFAULT, new ActionListener() { - - @Override - public void onResponse(BulkByScrollResponse response) { - taskFinished.countDown(); - } - - @Override - public void onFailure(Exception e) { - fail(e.toString()); - } - }); - - TaskId taskIdToRethrottle = findTaskToRethrottle(DeleteByQueryAction.NAME); - float requestsPerSecond = 1000f; - ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); - assertThat(response.getTasks(), hasSize(1)); - assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); - assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); - assertEquals(Float.toString(requestsPerSecond), - ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); - taskFinished.await(2, TimeUnit.SECONDS); - - // any rethrottling after the delete-by-query is done performed with the same taskId should result in a failure - response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), - highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); - assertTrue(response.getTasks().isEmpty()); - assertFalse(response.getNodeFailures().isEmpty()); - assertEquals(1, response.getNodeFailures().size()); - assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", - response.getNodeFailures().get(0).getCause().getMessage()); - } - } - public void testBulkProcessorIntegration() throws IOException { int nbItems = randomIntBetween(10, 100); boolean[] errors = new boolean[nbItems]; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java index 1301588bf8fff..8c6b1c6045855 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameRequestConvertersTests.java @@ -24,6 +24,7 @@ import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; import org.elasticsearch.client.dataframe.PutDataFrameTransformRequest; @@ -147,4 +148,29 @@ public void testGetDataFrameTransformStats() { assertEquals(HttpGet.METHOD_NAME, request.getMethod()); assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo/_stats")); } + + public void testGetDataFrameTransform() { + GetDataFrameTransformRequest getRequest = new GetDataFrameTransformRequest("bar"); + Request request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/bar")); + + assertFalse(request.getParameters().containsKey("from")); + assertFalse(request.getParameters().containsKey("size")); + + getRequest.setFrom(0); + getRequest.setSize(10); + request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + assertEquals("0", request.getParameters().get("from")); + assertEquals("10", request.getParameters().get("size")); + } + + public void testGetDataFrameTransform_givenMulitpleIds() { + GetDataFrameTransformRequest getRequest = new GetDataFrameTransformRequest("foo", "bar", "baz"); + Request request = DataFrameRequestConverters.getDataFrameTransform(getRequest); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertThat(request.getEndpoint(), equalTo("/_data_frame/transforms/foo,bar,baz")); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java index 25b09866e1567..e8724cc071dae 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/DataFrameTransformIT.java @@ -27,6 +27,8 @@ import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; @@ -52,6 +54,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.junit.After; @@ -67,6 +70,7 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -153,16 +157,8 @@ public void testCreateDelete() throws IOException { String sourceIndex = "transform-source"; createIndex(sourceIndex); - QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); - GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); - AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); - aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); - AggregationConfig aggConfig = new AggregationConfig(aggBuilder); - PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); - String id = "test-crud"; - DataFrameTransformConfig transform = new DataFrameTransformConfig(id, - new SourceConfig(new String[]{sourceIndex}, queryConfig), new DestConfig("pivot-dest"), pivotConfig); + DataFrameTransformConfig transform = validDataFrameTransformConfig(id, sourceIndex, "pivot-dest"); DataFrameClient client = highLevelClient().dataFrame(); AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, @@ -180,20 +176,78 @@ public void testCreateDelete() throws IOException { assertThat(deleteError.getMessage(), containsString("Transform with id [test-crud] could not be found")); } - public void testStartStop() throws IOException { + public void testGetTransform() throws IOException { String sourceIndex = "transform-source"; createIndex(sourceIndex); - QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); - GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); - AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); - aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); - AggregationConfig aggConfig = new AggregationConfig(aggBuilder); - PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); + String id = "test-get"; + DataFrameTransformConfig transform = validDataFrameTransformConfig(id, sourceIndex, "pivot-dest"); + + DataFrameClient client = highLevelClient().dataFrame(); + AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, + client::putDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + + GetDataFrameTransformRequest getRequest = new GetDataFrameTransformRequest(id); + GetDataFrameTransformResponse getResponse = execute(getRequest, client::getDataFrameTransform, + client::getDataFrameTransformAsync); + assertNull(getResponse.getInvalidTransforms()); + assertThat(getResponse.getTransformConfigurations(), hasSize(1)); + assertEquals(transform, getResponse.getTransformConfigurations().get(0)); + } + + public void testGetAllAndPageTransforms() throws IOException { + String sourceIndex = "transform-source"; + createIndex(sourceIndex); + + DataFrameClient client = highLevelClient().dataFrame(); + + DataFrameTransformConfig transform = validDataFrameTransformConfig("test-get-all-1", sourceIndex, "pivot-dest-1"); + AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, + client::putDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + + transform = validDataFrameTransformConfig("test-get-all-2", sourceIndex, "pivot-dest-2"); + ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, + client::putDataFrameTransformAsync); + assertTrue(ack.isAcknowledged()); + + GetDataFrameTransformRequest getRequest = new GetDataFrameTransformRequest("_all"); + GetDataFrameTransformResponse getResponse = execute(getRequest, client::getDataFrameTransform, + client::getDataFrameTransformAsync); + assertNull(getResponse.getInvalidTransforms()); + assertThat(getResponse.getTransformConfigurations(), hasSize(2)); + assertEquals(transform, getResponse.getTransformConfigurations().get(1)); + + getRequest.setFrom(0); + getRequest.setSize(1); + getResponse = execute(getRequest, client::getDataFrameTransform, + client::getDataFrameTransformAsync); + assertNull(getResponse.getInvalidTransforms()); + assertThat(getResponse.getTransformConfigurations(), hasSize(1)); + + GetDataFrameTransformRequest getMulitple = new GetDataFrameTransformRequest("test-get-all-1", "test-get-all-2"); + getResponse = execute(getMulitple, client::getDataFrameTransform, + client::getDataFrameTransformAsync); + assertNull(getResponse.getInvalidTransforms()); + assertThat(getResponse.getTransformConfigurations(), hasSize(2)); + } + + public void testGetMissingTransform() { + DataFrameClient client = highLevelClient().dataFrame(); + + ElasticsearchStatusException missingError = expectThrows(ElasticsearchStatusException.class, + () -> execute(new GetDataFrameTransformRequest("unknown"), client::getDataFrameTransform, + client::getDataFrameTransformAsync)); + assertThat(missingError.status(), equalTo(RestStatus.NOT_FOUND)); + } + + public void testStartStop() throws IOException { + String sourceIndex = "transform-source"; + createIndex(sourceIndex); String id = "test-stop-start"; - DataFrameTransformConfig transform = new DataFrameTransformConfig(id, - new SourceConfig(new String[]{sourceIndex}, queryConfig), new DestConfig("pivot-dest"), pivotConfig); + DataFrameTransformConfig transform = validDataFrameTransformConfig(id, sourceIndex, "pivot-dest"); DataFrameClient client = highLevelClient().dataFrame(); AcknowledgedResponse ack = execute(new PutDataFrameTransformRequest(transform), client::putDataFrameTransform, @@ -226,15 +280,7 @@ public void testPreview() throws IOException { createIndex(sourceIndex); indexData(sourceIndex); - QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); - GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); - AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); - aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); - AggregationConfig aggConfig = new AggregationConfig(aggBuilder); - PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); - - DataFrameTransformConfig transform = new DataFrameTransformConfig("test-preview", - new SourceConfig(new String[]{sourceIndex}, queryConfig), null, pivotConfig); + DataFrameTransformConfig transform = validDataFrameTransformConfig("test-preview", sourceIndex, null); DataFrameClient client = highLevelClient().dataFrame(); PreviewDataFrameTransformResponse preview = execute(new PreviewDataFrameTransformRequest(transform), @@ -245,11 +291,27 @@ public void testPreview() throws IOException { assertThat(docs, hasSize(2)); Optional> theresa = docs.stream().filter(doc -> "theresa".equals(doc.get("reviewer"))).findFirst(); assertTrue(theresa.isPresent()); - assertEquals(2.5d, (double)theresa.get().get("avg_rating"), 0.01d); + assertEquals(2.5d, (double) theresa.get().get("avg_rating"), 0.01d); Optional> michel = docs.stream().filter(doc -> "michel".equals(doc.get("reviewer"))).findFirst(); assertTrue(michel.isPresent()); - assertEquals(3.6d, (double)michel.get().get("avg_rating"), 0.1d); + assertEquals(3.6d, (double) michel.get().get("avg_rating"), 0.1d); + } + + private DataFrameTransformConfig validDataFrameTransformConfig(String id, String source, String destination) { + QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); + GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); + AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); + aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); + AggregationConfig aggConfig = new AggregationConfig(aggBuilder); + PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); + + DestConfig destConfig = (destination != null) ? new DestConfig(destination) : null; + + return new DataFrameTransformConfig(id, + new SourceConfig(new String[]{source}, queryConfig), + destConfig, + pivotConfig); } public void testGetStats() throws Exception { diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java index 976ae754d335f..f758156c222a8 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ESRestHighLevelClientTestCase.java @@ -34,6 +34,7 @@ import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ingest.Pipeline; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.rest.ESRestTestCase; import org.junit.AfterClass; import org.junit.Before; @@ -130,7 +131,7 @@ protected interface AsyncMethodNoRequest { private static class HighLevelClient extends RestHighLevelClient { private HighLevelClient(RestClient restClient) { - super(restClient, (client) -> {}, Collections.emptyList()); + super(restClient, (client) -> {}, new SearchModule(Settings.EMPTY, false, Collections.emptyList()).getNamedXContents()); } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java index cfdd29cdfbfbf..73cca7827e73b 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ReindexIT.java @@ -19,23 +19,54 @@ package org.elasticsearch.client; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.bulk.BulkRequest; +import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.ingest.PutPipelineRequest; +import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.tasks.TaskSubmissionResponse; import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.IdsQueryBuilder; import org.elasticsearch.index.reindex.BulkByScrollResponse; +import org.elasticsearch.index.reindex.DeleteByQueryAction; +import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.index.reindex.ReindexRequest; +import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.index.reindex.UpdateByQueryAction; +import org.elasticsearch.index.reindex.UpdateByQueryRequest; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.Script; +import org.elasticsearch.tasks.RawTaskStatus; +import org.elasticsearch.tasks.TaskId; import java.io.IOException; import java.util.Collections; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.lessThan; public class ReindexIT extends ESRestHighLevelClientTestCase { + private static final String CONFLICT_PIPELINE_ID = "conflict_pipeline"; + public void testReindex() throws IOException { final String sourceIndex = "source1"; final String destinationIndex = "dest"; @@ -122,10 +153,338 @@ public void testReindexTask() throws Exception { } } + public void testReindexConflict() throws IOException { + final String sourceIndex = "testreindexconflict_source"; + final String destIndex = "testreindexconflict_dest"; + + final Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + createIndex(destIndex, settings); + final BulkRequest bulkRequest = new BulkRequest() + .add(new IndexRequest(sourceIndex).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + assertThat(highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status(), equalTo(RestStatus.OK)); + + putConflictPipeline(); + + final ReindexRequest reindexRequest = new ReindexRequest(); + reindexRequest.setSourceIndices(sourceIndex); + reindexRequest.setDestIndex(destIndex); + reindexRequest.setRefresh(true); + reindexRequest.setDestPipeline(CONFLICT_PIPELINE_ID); + final BulkByScrollResponse response = highLevelClient().reindex(reindexRequest, RequestOptions.DEFAULT); + + assertThat(response.getVersionConflicts(), equalTo(2L)); + assertThat(response.getBulkFailures(), empty()); + assertThat(response.getSearchFailures(), hasSize(2)); + assertThat( + response.getSearchFailures().stream().map(ScrollableHitSource.SearchFailure::toString).collect(Collectors.toSet()), + everyItem(containsString("version conflict")) + ); + + assertThat(response.getTotal(), equalTo(2L)); + assertThat(response.getCreated(), equalTo(0L)); + assertThat(response.getUpdated(), equalTo(0L)); + assertThat(response.getDeleted(), equalTo(0L)); + assertThat(response.getNoops(), equalTo(0L)); + assertThat(response.getBatches(), equalTo(1)); + assertTrue(response.getTook().getMillis() > 0); + } + + public void testUpdateByQuery() throws Exception { + final String sourceIndex = "source1"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex).id("1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: create one doc in dest + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + } + { + // test2: update using script + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setScript(new Script("if (ctx._source.foo == 2) ctx._source.foo++;")); + updateByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(updateByQueryRequest, highLevelClient()::updateByQuery, highLevelClient()::updateByQueryAsync); + assertEquals(2, bulkResponse.getTotal()); + assertEquals(2, bulkResponse.getUpdated()); + assertEquals(0, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertEquals( + 3, + (int) (highLevelClient().get(new GetRequest(sourceIndex, "2"), RequestOptions.DEFAULT) + .getSourceAsMap().get("foo")) + ); + } + { + // test update-by-query rethrottling + UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(sourceIndex); + updateByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); + updateByQueryRequest.setRefresh(true); + + // this following settings are supposed to halt reindexing after first document + updateByQueryRequest.setBatchSize(1); + updateByQueryRequest.setRequestsPerSecond(0.00001f); + final CountDownLatch taskFinished = new CountDownLatch(1); + highLevelClient().updateByQueryAsync(updateByQueryRequest, RequestOptions.DEFAULT, new ActionListener() { + + @Override + public void onResponse(BulkByScrollResponse response) { + taskFinished.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e.toString()); + } + }); + + TaskId taskIdToRethrottle = findTaskToRethrottle(UpdateByQueryAction.NAME); + float requestsPerSecond = 1000f; + ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), + highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync); + assertThat(response.getTasks(), hasSize(1)); + assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); + assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); + assertEquals(Float.toString(requestsPerSecond), + ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); + taskFinished.await(2, TimeUnit.SECONDS); + + // any rethrottling after the update-by-query is done performed with the same taskId should result in a failure + response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), + highLevelClient()::updateByQueryRethrottle, highLevelClient()::updateByQueryRethrottleAsync); + assertTrue(response.getTasks().isEmpty()); + assertFalse(response.getNodeFailures().isEmpty()); + assertEquals(1, response.getNodeFailures().size()); + assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", + response.getNodeFailures().get(0).getCause().getMessage()); + } + } + + public void testUpdateByQueryConflict() throws IOException { + final String index = "testupdatebyqueryconflict"; + + final Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(index, settings); + final BulkRequest bulkRequest = new BulkRequest() + .add(new IndexRequest(index).id("1").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .add(new IndexRequest(index).id("2").source(Collections.singletonMap("foo", "bar"), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + assertThat(highLevelClient().bulk(bulkRequest, RequestOptions.DEFAULT).status(), equalTo(RestStatus.OK)); + + putConflictPipeline(); + + final UpdateByQueryRequest updateByQueryRequest = new UpdateByQueryRequest(); + updateByQueryRequest.indices(index); + updateByQueryRequest.setRefresh(true); + updateByQueryRequest.setPipeline(CONFLICT_PIPELINE_ID); + final BulkByScrollResponse response = highLevelClient().updateByQuery(updateByQueryRequest, RequestOptions.DEFAULT); + + assertThat(response.getVersionConflicts(), equalTo(1L)); + assertThat(response.getBulkFailures(), empty()); + assertThat(response.getSearchFailures(), hasSize(1)); + assertThat( + response.getSearchFailures().stream().map(ScrollableHitSource.SearchFailure::toString).collect(Collectors.toSet()), + everyItem(containsString("version conflict")) + ); + + assertThat(response.getTotal(), equalTo(2L)); + assertThat(response.getCreated(), equalTo(0L)); + assertThat(response.getUpdated(), equalTo(1L)); + assertThat(response.getDeleted(), equalTo(0L)); + assertThat(response.getNoops(), equalTo(0L)); + assertThat(response.getBatches(), equalTo(1)); + assertTrue(response.getTook().getMillis() > 0); + } + + public void testDeleteByQuery() throws Exception { + final String sourceIndex = "source1"; + { + // Prepare + Settings settings = Settings.builder() + .put("number_of_shards", 1) + .put("number_of_replicas", 0) + .build(); + createIndex(sourceIndex, settings); + assertEquals( + RestStatus.OK, + highLevelClient().bulk( + new BulkRequest() + .add(new IndexRequest(sourceIndex).id("1") + .source(Collections.singletonMap("foo", 1), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("2") + .source(Collections.singletonMap("foo", 2), XContentType.JSON)) + .add(new IndexRequest(sourceIndex).id("3") + .source(Collections.singletonMap("foo", 3), XContentType.JSON)) + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE), + RequestOptions.DEFAULT + ).status() + ); + } + { + // test1: delete one doc + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); + deleteByQueryRequest.indices(sourceIndex); + deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("1")); + deleteByQueryRequest.setRefresh(true); + BulkByScrollResponse bulkResponse = + execute(deleteByQueryRequest, highLevelClient()::deleteByQuery, highLevelClient()::deleteByQueryAsync); + assertEquals(1, bulkResponse.getTotal()); + assertEquals(1, bulkResponse.getDeleted()); + assertEquals(0, bulkResponse.getNoops()); + assertEquals(0, bulkResponse.getVersionConflicts()); + assertEquals(1, bulkResponse.getBatches()); + assertTrue(bulkResponse.getTook().getMillis() > 0); + assertEquals(1, bulkResponse.getBatches()); + assertEquals(0, bulkResponse.getBulkFailures().size()); + assertEquals(0, bulkResponse.getSearchFailures().size()); + assertEquals( + 2, + highLevelClient().search(new SearchRequest(sourceIndex), RequestOptions.DEFAULT).getHits().getTotalHits().value + ); + } + { + // test delete-by-query rethrottling + DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(); + deleteByQueryRequest.indices(sourceIndex); + deleteByQueryRequest.setQuery(new IdsQueryBuilder().addIds("2", "3")); + deleteByQueryRequest.setRefresh(true); + + // this following settings are supposed to halt reindexing after first document + deleteByQueryRequest.setBatchSize(1); + deleteByQueryRequest.setRequestsPerSecond(0.00001f); + final CountDownLatch taskFinished = new CountDownLatch(1); + highLevelClient().deleteByQueryAsync(deleteByQueryRequest, RequestOptions.DEFAULT, new ActionListener() { + + @Override + public void onResponse(BulkByScrollResponse response) { + taskFinished.countDown(); + } + + @Override + public void onFailure(Exception e) { + fail(e.toString()); + } + }); + + TaskId taskIdToRethrottle = findTaskToRethrottle(DeleteByQueryAction.NAME); + float requestsPerSecond = 1000f; + ListTasksResponse response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), + highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); + assertThat(response.getTasks(), hasSize(1)); + assertEquals(taskIdToRethrottle, response.getTasks().get(0).getTaskId()); + assertThat(response.getTasks().get(0).getStatus(), instanceOf(RawTaskStatus.class)); + assertEquals(Float.toString(requestsPerSecond), + ((RawTaskStatus) response.getTasks().get(0).getStatus()).toMap().get("requests_per_second").toString()); + taskFinished.await(2, TimeUnit.SECONDS); + + // any rethrottling after the delete-by-query is done performed with the same taskId should result in a failure + response = execute(new RethrottleRequest(taskIdToRethrottle, requestsPerSecond), + highLevelClient()::deleteByQueryRethrottle, highLevelClient()::deleteByQueryRethrottleAsync); + assertTrue(response.getTasks().isEmpty()); + assertFalse(response.getNodeFailures().isEmpty()); + assertEquals(1, response.getNodeFailures().size()); + assertEquals("Elasticsearch exception [type=resource_not_found_exception, reason=task [" + taskIdToRethrottle + "] is missing]", + response.getNodeFailures().get(0).getCause().getMessage()); + } + } + + private static TaskId findTaskToRethrottle(String actionName) throws IOException { + long start = System.nanoTime(); + ListTasksRequest request = new ListTasksRequest(); + request.setActions(actionName); + request.setDetailed(true); + do { + ListTasksResponse list = highLevelClient().tasks().list(request, RequestOptions.DEFAULT); + list.rethrowFailures("Finding tasks to rethrottle"); + assertThat("tasks are left over from the last execution of this test", + list.getTaskGroups(), hasSize(lessThan(2))); + if (0 == list.getTaskGroups().size()) { + // The parent task hasn't started yet + continue; + } + TaskGroup taskGroup = list.getTaskGroups().get(0); + assertThat(taskGroup.getChildTasks(), empty()); + return taskGroup.getTaskInfo().getTaskId(); + } while (System.nanoTime() - start < TimeUnit.SECONDS.toNanos(10)); + throw new AssertionError("Couldn't find tasks to rethrottle. Here are the running tasks " + + highLevelClient().tasks().list(request, RequestOptions.DEFAULT)); + } + static CheckedRunnable checkCompletionStatus(RestClient client, String taskId) { return () -> { Response response = client.performRequest(new Request("GET", "/_tasks/" + taskId)); assertTrue((boolean) entityAsMap(response).get("completed")); }; } + + private void putConflictPipeline() throws IOException { + final XContentBuilder pipelineBuilder = jsonBuilder() + .startObject() + .startArray("processors") + .startObject() + .startObject("set") + .field("field", "_version") + .field("value", 1) + .endObject() + .endObject() + .startObject() + .startObject("set") + .field("field", "_id") + .field("value", "1") + .endObject() + .endObject() + .endArray() + .endObject(); + final PutPipelineRequest putPipelineRequest = new PutPipelineRequest(CONFLICT_PIPELINE_ID, BytesReference.bytes(pipelineBuilder), + pipelineBuilder.contentType()); + assertTrue(highLevelClient().ingest().putPipeline(putPipelineRequest, RequestOptions.DEFAULT).isAcknowledged()); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java index b2c2028d0fbbd..1176cabcc3d9c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SecurityRequestConvertersTests.java @@ -141,8 +141,8 @@ public void testPutRoleMapping() throws IOException { .addExpression(FieldRoleMapperExpression.ofUsername(username)) .addExpression(FieldRoleMapperExpression.ofGroups(groupname)) .build(); - final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(roleMappingName, true, Collections.singletonList( - rolename), rules, null, refreshPolicy); + final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(roleMappingName, true, + Collections.singletonList(rolename), Collections.emptyList(), rules, null, refreshPolicy); final Request request = SecurityRequestConverters.putRoleMapping(putRoleMappingRequest); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java new file mode 100644 index 0000000000000..818eea4520ac4 --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformRequestTests.java @@ -0,0 +1,32 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; + +public class GetDataFrameTransformRequestTests extends ESTestCase { + public void testValidate() { + assertFalse(new GetDataFrameTransformRequest("valid-id").validate().isPresent()); + assertThat(new GetDataFrameTransformRequest(new String[0]).validate().get().getMessage(), + containsString("data frame transform id must not be null")); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java new file mode 100644 index 0000000000000..f7386e936301b --- /dev/null +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/dataframe/GetDataFrameTransformResponseTests.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.dataframe; + +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfig; +import org.elasticsearch.client.dataframe.transforms.DataFrameTransformConfigTests; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.test.AbstractXContentTestCase.xContentTester; + + +public class GetDataFrameTransformResponseTests extends ESTestCase { + + public void testXContentParser() throws IOException { + xContentTester(this::createParser, + GetDataFrameTransformResponseTests::createTestInstance, + GetDataFrameTransformResponseTests::toXContent, + GetDataFrameTransformResponse::fromXContent) + .supportsUnknownFields(false) + .test(); + } + + private static GetDataFrameTransformResponse createTestInstance() { + int numTransforms = randomIntBetween(0, 3); + List transforms = new ArrayList<>(); + for (int i=0; i invalidIds = Arrays.asList(generateRandomStringArray(5, 6, false, false)); + invalidTransforms = new GetDataFrameTransformResponse.InvalidTransforms(invalidIds); + } + return new GetDataFrameTransformResponse(transforms, transforms.size() + 10, invalidTransforms); + } + + private static void toXContent(GetDataFrameTransformResponse response, XContentBuilder builder) throws IOException { + builder.startObject(); + { + builder.field("count", response.getCount()); + builder.field("transforms", response.getTransformConfigurations()); + if (response.getInvalidTransforms() != null) { + builder.startObject("invalid_transforms"); + builder.field("count", response.getInvalidTransforms().getCount()); + builder.field("transforms", response.getInvalidTransforms().getTransformIds()); + builder.endObject(); + } + } + builder.endObject(); + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, Collections.emptyList()); + return new NamedXContentRegistry(searchModule.getNamedXContents()); + } +} diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java index bf4940654effe..b7d6967206c2c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/DataFrameTransformDocumentationIT.java @@ -27,6 +27,8 @@ import org.elasticsearch.client.core.AcknowledgedResponse; import org.elasticsearch.client.core.IndexerState; import org.elasticsearch.client.dataframe.DeleteDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformRequest; +import org.elasticsearch.client.dataframe.GetDataFrameTransformResponse; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsRequest; import org.elasticsearch.client.dataframe.GetDataFrameTransformStatsResponse; import org.elasticsearch.client.dataframe.PreviewDataFrameTransformRequest; @@ -178,7 +180,6 @@ public void onFailure(Exception e) { // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); - ActionListener ackListener = listener; listener = new LatchedActionListener<>(listener, latch); // tag::put-data-frame-transform-execute-async @@ -264,7 +265,6 @@ public void onFailure(Exception e) { // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); - ActionListener ackListener = listener; listener = new LatchedActionListener<>(listener, latch); StartDataFrameTransformRequest request = new StartDataFrameTransformRequest("mega-transform"); @@ -294,7 +294,6 @@ public void onFailure(Exception e) { // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); - ActionListener ackListener = listener; listener = new LatchedActionListener<>(listener, latch); StopDataFrameTransformRequest request = new StopDataFrameTransformRequest("mega-transform"); @@ -392,14 +391,14 @@ public void testPreview() throws IOException, InterruptedException { pivotConfig); PreviewDataFrameTransformRequest request = - new PreviewDataFrameTransformRequest(transformConfig); // <3> + new PreviewDataFrameTransformRequest(transformConfig); // <3> // end::preview-data-frame-transform-request { // tag::preview-data-frame-transform-execute PreviewDataFrameTransformResponse response = - client.dataFrame() - .previewDataFrameTransform(request, RequestOptions.DEFAULT); + client.dataFrame() + .previewDataFrameTransform(request, RequestOptions.DEFAULT); // end::preview-data-frame-transform-execute assertNotNull(response.getDocs()); @@ -482,10 +481,83 @@ public void testGetStats() throws IOException, InterruptedException { { // tag::get-data-frame-transform-stats-execute-listener ActionListener listener = - new ActionListener() { + new ActionListener() { + @Override + public void onResponse( + GetDataFrameTransformStatsResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::get-data-frame-transform-stats-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::get-data-frame-transform-stats-execute-async + client.dataFrame().getDataFrameTransformStatsAsync( + request, RequestOptions.DEFAULT, listener); // <1> + // end::get-data-frame-transform-stats-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } + + + public void testGetDataFrameTransform() throws IOException, InterruptedException { + createIndex("source-data"); + + QueryConfig queryConfig = new QueryConfig(new MatchAllQueryBuilder()); + GroupConfig groupConfig = new GroupConfig(Collections.singletonMap("reviewer", new TermsGroupSource("user_id"))); + AggregatorFactories.Builder aggBuilder = new AggregatorFactories.Builder(); + aggBuilder.addAggregator(AggregationBuilders.avg("avg_rating").field("stars")); + AggregationConfig aggConfig = new AggregationConfig(aggBuilder); + PivotConfig pivotConfig = new PivotConfig(groupConfig, aggConfig); + + + DataFrameTransformConfig putTransformConfig = new DataFrameTransformConfig("mega-transform", + new SourceConfig(new String[]{"source-data"}, queryConfig), + new DestConfig("pivot-dest"), pivotConfig); + + RestHighLevelClient client = highLevelClient(); + client.dataFrame().putDataFrameTransform(new PutDataFrameTransformRequest(putTransformConfig), RequestOptions.DEFAULT); + transformsToClean.add(putTransformConfig.getId()); + + { + // tag::get-data-frame-transform-request + GetDataFrameTransformRequest request = + new GetDataFrameTransformRequest("mega-transform"); // <1> + // end::get-data-frame-transform-request + + // tag::get-data-frame-transform-request-options + request.setFrom(0); // <1> + request.setSize(100); // <2> + // end::get-data-frame-transform-request-options + + // tag::get-data-frame-transform-execute + GetDataFrameTransformResponse response = + client.dataFrame() + .getDataFrameTransform(request, RequestOptions.DEFAULT); + // end::get-data-frame-transform-execute + + // tag::get-data-frame-transform-response + List transformConfigs = + response.getTransformConfigurations(); + // end::get-data-frame-transform-response + + assertEquals(1, transformConfigs.size()); + } + { + // tag::get-data-frame-transform-execute-listener + ActionListener listener = + new ActionListener() { @Override - public void onResponse( - GetDataFrameTransformStatsResponse response) { + public void onResponse(GetDataFrameTransformResponse response) { // <1> } @@ -494,16 +566,18 @@ public void onFailure(Exception e) { // <2> } }; - // end::get-data-frame-transform-stats-execute-listener + // end::get-data-frame-transform-execute-listener // Replace the empty listener by a blocking listener in test final CountDownLatch latch = new CountDownLatch(1); listener = new LatchedActionListener<>(listener, latch); - // tag::get-data-frame-transform-stats-execute-async - client.dataFrame().getDataFrameTransformStatsAsync( + GetDataFrameTransformRequest request = new GetDataFrameTransformRequest("mega-transform"); + + // tag::get-data-frame-transform-execute-async + client.dataFrame().getDataFrameTransformAsync( request, RequestOptions.DEFAULT, listener); // <1> - // end::get-data-frame-transform-stats-execute-async + // end::get-data-frame-transform-execute-async assertTrue(latch.await(30L, TimeUnit.SECONDS)); } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java index 1afe6382fa5f8..b095ca5a9a0db 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SecurityDocumentationIT.java @@ -75,6 +75,7 @@ import org.elasticsearch.client.security.PutUserRequest; import org.elasticsearch.client.security.PutUserResponse; import org.elasticsearch.client.security.RefreshPolicy; +import org.elasticsearch.client.security.TemplateRoleName; import org.elasticsearch.client.security.support.ApiKey; import org.elasticsearch.client.security.support.CertificateInfo; import org.elasticsearch.client.security.support.expressiondsl.RoleMapperExpression; @@ -94,6 +95,8 @@ import org.elasticsearch.common.util.set.Sets; import org.hamcrest.Matchers; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; import java.io.IOException; import java.time.Instant; import java.util.ArrayList; @@ -108,9 +111,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import javax.crypto.SecretKeyFactory; -import javax.crypto.spec.PBEKeySpec; - import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -120,6 +120,7 @@ import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.isIn; +import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -366,8 +367,8 @@ public void testPutRoleMapping() throws Exception { .addExpression(FieldRoleMapperExpression.ofUsername("*")) .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")) .build(); - final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), - rules, null, RefreshPolicy.NONE); + final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, + Collections.singletonList("superuser"), Collections.emptyList(), rules, null, RefreshPolicy.NONE); final PutRoleMappingResponse response = client.security().putRoleMapping(request, RequestOptions.DEFAULT); // end::put-role-mapping-execute // tag::put-role-mapping-response @@ -381,7 +382,8 @@ public void testPutRoleMapping() throws Exception { .addExpression(FieldRoleMapperExpression.ofUsername("*")) .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")) .build(); - final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), + final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.emptyList(), + Collections.singletonList(new TemplateRoleName("{\"source\":\"{{username}}\"}", TemplateRoleName.Format.STRING)), rules, null, RefreshPolicy.NONE); // tag::put-role-mapping-execute-listener ActionListener listener = new ActionListener() { @@ -397,25 +399,32 @@ public void onFailure(Exception e) { }; // end::put-role-mapping-execute-listener + // avoid unused local warning + assertNotNull(listener); + // Replace the empty listener by a blocking listener in test - final CountDownLatch latch = new CountDownLatch(1); - listener = new LatchedActionListener<>(listener, latch); + final PlainActionFuture future = new PlainActionFuture<>(); + listener = future; // tag::put-role-mapping-execute-async client.security().putRoleMappingAsync(request, RequestOptions.DEFAULT, listener); // <1> // end::put-role-mapping-execute-async - assertTrue(latch.await(30L, TimeUnit.SECONDS)); + assertThat(future.get(), notNullValue()); + assertThat(future.get().isCreated(), is(false)); } } public void testGetRoleMappings() throws Exception { final RestHighLevelClient client = highLevelClient(); + final TemplateRoleName monitoring = new TemplateRoleName("{\"source\":\"monitoring\"}", TemplateRoleName.Format.STRING); + final TemplateRoleName template = new TemplateRoleName("{\"source\":\"{{username}}\"}", TemplateRoleName.Format.STRING); + final RoleMapperExpression rules1 = AnyRoleMapperExpression.builder().addExpression(FieldRoleMapperExpression.ofUsername("*")) .addExpression(FieldRoleMapperExpression.ofGroups("cn=admins,dc=example,dc=com")).build(); - final PutRoleMappingRequest putRoleMappingRequest1 = new PutRoleMappingRequest("mapping-example-1", true, Collections.singletonList( - "superuser"), rules1, null, RefreshPolicy.NONE); + final PutRoleMappingRequest putRoleMappingRequest1 = new PutRoleMappingRequest("mapping-example-1", true, Collections.emptyList(), + Arrays.asList(monitoring, template), rules1, null, RefreshPolicy.NONE); final PutRoleMappingResponse putRoleMappingResponse1 = client.security().putRoleMapping(putRoleMappingRequest1, RequestOptions.DEFAULT); boolean isCreated1 = putRoleMappingResponse1.isCreated(); @@ -424,8 +433,8 @@ public void testGetRoleMappings() throws Exception { "cn=admins,dc=example,dc=com")).build(); final Map metadata2 = new HashMap<>(); metadata2.put("k1", "v1"); - final PutRoleMappingRequest putRoleMappingRequest2 = new PutRoleMappingRequest("mapping-example-2", true, Collections.singletonList( - "monitoring"), rules2, metadata2, RefreshPolicy.NONE); + final PutRoleMappingRequest putRoleMappingRequest2 = new PutRoleMappingRequest("mapping-example-2", true, + Arrays.asList("superuser"), Collections.emptyList(), rules2, metadata2, RefreshPolicy.NONE); final PutRoleMappingResponse putRoleMappingResponse2 = client.security().putRoleMapping(putRoleMappingRequest2, RequestOptions.DEFAULT); boolean isCreated2 = putRoleMappingResponse2.isCreated(); @@ -445,7 +454,9 @@ public void testGetRoleMappings() throws Exception { assertThat(mappings.get(0).getName(), is("mapping-example-1")); assertThat(mappings.get(0).getExpression(), equalTo(rules1)); assertThat(mappings.get(0).getMetadata(), equalTo(Collections.emptyMap())); - assertThat(mappings.get(0).getRoles(), contains("superuser")); + assertThat(mappings.get(0).getRoles(), iterableWithSize(0)); + assertThat(mappings.get(0).getRoleTemplates(), iterableWithSize(2)); + assertThat(mappings.get(0).getRoleTemplates(), containsInAnyOrder(monitoring, template)); } { @@ -462,11 +473,13 @@ public void testGetRoleMappings() throws Exception { if (roleMapping.getName().equals("mapping-example-1")) { assertThat(roleMapping.getMetadata(), equalTo(Collections.emptyMap())); assertThat(roleMapping.getExpression(), equalTo(rules1)); - assertThat(roleMapping.getRoles(), contains("superuser")); + assertThat(roleMapping.getRoles(), emptyIterable()); + assertThat(roleMapping.getRoleTemplates(), contains(monitoring, template)); } else { assertThat(roleMapping.getMetadata(), equalTo(metadata2)); assertThat(roleMapping.getExpression(), equalTo(rules2)); - assertThat(roleMapping.getRoles(), contains("monitoring")); + assertThat(roleMapping.getRoles(), contains("superuser")); + assertThat(roleMapping.getRoleTemplates(), emptyIterable()); } } } @@ -485,11 +498,13 @@ public void testGetRoleMappings() throws Exception { if (roleMapping.getName().equals("mapping-example-1")) { assertThat(roleMapping.getMetadata(), equalTo(Collections.emptyMap())); assertThat(roleMapping.getExpression(), equalTo(rules1)); - assertThat(roleMapping.getRoles(), contains("superuser")); + assertThat(roleMapping.getRoles(), emptyIterable()); + assertThat(roleMapping.getRoleTemplates(), containsInAnyOrder(monitoring, template)); } else { assertThat(roleMapping.getMetadata(), equalTo(metadata2)); assertThat(roleMapping.getExpression(), equalTo(rules2)); - assertThat(roleMapping.getRoles(), contains("monitoring")); + assertThat(roleMapping.getRoles(), contains("superuser")); + assertThat(roleMapping.getRoleTemplates(), emptyIterable()); } } } @@ -1093,8 +1108,8 @@ public void testDeleteRoleMapping() throws Exception { { // Create role mappings final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("*"); - final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, Collections.singletonList("superuser"), - rules, null, RefreshPolicy.NONE); + final PutRoleMappingRequest request = new PutRoleMappingRequest("mapping-example", true, + Collections.singletonList("superuser"), Collections.emptyList(), rules, null, RefreshPolicy.NONE); final PutRoleMappingResponse response = client.security().putRoleMapping(request, RequestOptions.DEFAULT); boolean isCreated = response.isCreated(); assertTrue(isCreated); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java index 29bc7812f5b7e..f30307ebde51a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/ExpressionRoleMappingTests.java @@ -31,6 +31,7 @@ import java.util.HashMap; import java.util.Map; +import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.equalTo; public class ExpressionRoleMappingTests extends ESTestCase { @@ -59,48 +60,53 @@ public void usedDeprecatedName(String usedName, String modernName) { public void usedDeprecatedField(String usedName, String replacedWith) { } }, json), "example-role-mapping"); - final ExpressionRoleMapping expectedRoleMapping = new ExpressionRoleMapping("example-role-mapping", FieldRoleMapperExpression - .ofKeyValues("realm.name", "kerb1"), Collections.singletonList("superuser"), null, true); + final ExpressionRoleMapping expectedRoleMapping = new ExpressionRoleMapping("example-role-mapping", + FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), + null, true); assertThat(expressionRoleMapping, equalTo(expectedRoleMapping)); } public void testEqualsHashCode() { - final ExpressionRoleMapping expressionRoleMapping = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression - .ofKeyValues("realm.name", "kerb1"), Collections.singletonList("superuser"), null, true); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(expressionRoleMapping, (original) -> { - return new ExpressionRoleMapping(original.getName(), original.getExpression(), original.getRoles(), original.getMetadata(), - original.isEnabled()); - }); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(expressionRoleMapping, (original) -> { - return new ExpressionRoleMapping(original.getName(), original.getExpression(), original.getRoles(), original.getMetadata(), - original.isEnabled()); - }, ExpressionRoleMappingTests::mutateTestItem); + final ExpressionRoleMapping expressionRoleMapping = new ExpressionRoleMapping("kerberosmapping", + FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), + null, true); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(expressionRoleMapping, original -> + new ExpressionRoleMapping(original.getName(), original.getExpression(), original.getRoles(), original.getRoleTemplates(), + original.getMetadata(), original.isEnabled()), ExpressionRoleMappingTests::mutateTestItem); } - private static ExpressionRoleMapping mutateTestItem(ExpressionRoleMapping original) { + private static ExpressionRoleMapping mutateTestItem(ExpressionRoleMapping original) throws IOException { ExpressionRoleMapping mutated = null; - switch (randomIntBetween(0, 4)) { + switch (randomIntBetween(0, 5)) { case 0: - mutated = new ExpressionRoleMapping("namechanged", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections - .singletonList("superuser"), null, true); + mutated = new ExpressionRoleMapping("namechanged", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), null, true); break; case 1: - mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("changed", "changed"), Collections - .singletonList("superuser"), null, true); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("changed", "changed"), + singletonList("superuser"), Collections.emptyList(), null, true); break; case 2: - mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections - .singletonList("changed"), null, true); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("changed"), Collections.emptyList(), null, true); break; case 3: Map metadata = new HashMap<>(); metadata.put("a", "b"); - mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections - .singletonList("superuser"), metadata, true); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), metadata, true); break; case 4: - mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), Collections - .singletonList("superuser"), null, false); + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + Collections.emptyList(), + singletonList(new TemplateRoleName(Collections.singletonMap("source", "superuser"), TemplateRoleName.Format.STRING)), + null, true); + break; + case 5: + mutated = new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", "kerb1"), + singletonList("superuser"), Collections.emptyList(), null, false); break; } return mutated; diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java index b612c9ead28a5..20883b859f9ae 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/GetRoleMappingsResponseTests.java @@ -74,9 +74,10 @@ public void usedDeprecatedField(String usedName, String replacedWith) { }, json)); final List expectedRoleMappingsList = new ArrayList<>(); expectedRoleMappingsList.add(new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", - "kerb1"), Collections.singletonList("superuser"), null, true)); + "kerb1"), Collections.singletonList("superuser"), Collections.emptyList(), null, true)); expectedRoleMappingsList.add(new ExpressionRoleMapping("ldapmapping", FieldRoleMapperExpression.ofGroups( - "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), null, false)); + "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), Collections.emptyList(), + null, false)); final GetRoleMappingsResponse expectedResponse = new GetRoleMappingsResponse(expectedRoleMappingsList); assertThat(response, equalTo(expectedResponse)); } @@ -84,7 +85,7 @@ public void usedDeprecatedField(String usedName, String replacedWith) { public void testEqualsHashCode() { final List roleMappingsList = new ArrayList<>(); roleMappingsList.add(new ExpressionRoleMapping("kerberosmapping", FieldRoleMapperExpression.ofKeyValues("realm.name", - "kerb1"), Collections.singletonList("superuser"), null, true)); + "kerb1"), Collections.singletonList("superuser"), Collections.emptyList(), null, true)); final GetRoleMappingsResponse response = new GetRoleMappingsResponse(roleMappingsList); assertNotNull(response); EqualsHashCodeTestUtils.checkEqualsAndHashCode(response, (original) -> { @@ -101,15 +102,16 @@ private static GetRoleMappingsResponse mutateTestItem(GetRoleMappingsResponse or case 0: final List roleMappingsList1 = new ArrayList<>(); roleMappingsList1.add(new ExpressionRoleMapping("ldapmapping", FieldRoleMapperExpression.ofGroups( - "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), null, false)); + "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), Collections.singletonList("monitoring"), Collections.emptyList(), + null, false)); mutated = new GetRoleMappingsResponse(roleMappingsList1); break; case 1: final List roleMappingsList2 = new ArrayList<>(); - ExpressionRoleMapping orginialRoleMapping = original.getMappings().get(0); - roleMappingsList2.add(new ExpressionRoleMapping(orginialRoleMapping.getName(), FieldRoleMapperExpression.ofGroups( - "cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), - orginialRoleMapping.getRoles(), orginialRoleMapping.getMetadata(), !orginialRoleMapping.isEnabled())); + ExpressionRoleMapping originalRoleMapping = original.getMappings().get(0); + roleMappingsList2.add(new ExpressionRoleMapping(originalRoleMapping.getName(), + FieldRoleMapperExpression.ofGroups("cn=ipausers,cn=groups,cn=accounts,dc=ipademo,dc=local"), originalRoleMapping.getRoles(), + Collections.emptyList(), originalRoleMapping.getMetadata(), !originalRoleMapping.isEnabled())); mutated = new GetRoleMappingsResponse(roleMappingsList2); break; } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/PutRoleMappingRequestTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/PutRoleMappingRequestTests.java index f0a3f7572ef3b..bf5ba34bffc5c 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/security/PutRoleMappingRequestTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/security/PutRoleMappingRequestTests.java @@ -29,12 +29,12 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils; import java.io.IOException; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; @@ -49,7 +49,8 @@ public void testPutRoleMappingRequest() { metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, rules, metadata, refreshPolicy); + PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, Collections.emptyList(), rules, + metadata, refreshPolicy); assertNotNull(putRoleMappingRequest); assertThat(putRoleMappingRequest.getName(), equalTo(name)); assertThat(putRoleMappingRequest.isEnabled(), equalTo(enabled)); @@ -68,23 +69,39 @@ public void testPutRoleMappingRequestThrowsExceptionForNullOrEmptyName() { metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - final IllegalArgumentException ile = expectThrows(IllegalArgumentException.class, () -> new PutRoleMappingRequest(name, enabled, - roles, rules, metadata, refreshPolicy)); + final IllegalArgumentException ile = expectThrows(IllegalArgumentException.class, + () -> new PutRoleMappingRequest(name, enabled, roles, Collections.emptyList(), rules, metadata, refreshPolicy)); assertThat(ile.getMessage(), equalTo("role-mapping name is missing")); } - public void testPutRoleMappingRequestThrowsExceptionForNullOrEmptyRoles() { + public void testPutRoleMappingRequestThrowsExceptionForNullRoles() { final String name = randomAlphaOfLength(5); final boolean enabled = randomBoolean(); - final List roles = randomBoolean() ? null : Collections.emptyList(); + final List roles = null ; + final List roleTemplates = Collections.emptyList(); final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("user"); final Map metadata = new HashMap<>(); metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - final IllegalArgumentException ile = expectThrows(IllegalArgumentException.class, () -> new PutRoleMappingRequest(name, enabled, - roles, rules, metadata, refreshPolicy)); - assertThat(ile.getMessage(), equalTo("role-mapping roles are missing")); + final RuntimeException ex = expectThrows(RuntimeException.class, + () -> new PutRoleMappingRequest(name, enabled, roles, roleTemplates, rules, metadata, refreshPolicy)); + assertThat(ex.getMessage(), equalTo("role-mapping roles cannot be null")); + } + + public void testPutRoleMappingRequestThrowsExceptionForEmptyRoles() { + final String name = randomAlphaOfLength(5); + final boolean enabled = randomBoolean(); + final List roles = Collections.emptyList(); + final List roleTemplates = Collections.emptyList(); + final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("user"); + final Map metadata = new HashMap<>(); + metadata.put("k1", "v1"); + final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); + + final RuntimeException ex = expectThrows(RuntimeException.class, + () -> new PutRoleMappingRequest(name, enabled, roles, roleTemplates, rules, metadata, refreshPolicy)); + assertThat(ex.getMessage(), equalTo("in a role-mapping, one of roles or role_templates is required")); } public void testPutRoleMappingRequestThrowsExceptionForNullRules() { @@ -96,7 +113,8 @@ public void testPutRoleMappingRequestThrowsExceptionForNullRules() { metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - expectThrows(NullPointerException.class, () -> new PutRoleMappingRequest(name, enabled, roles, rules, metadata, refreshPolicy)); + expectThrows(NullPointerException.class, () -> new PutRoleMappingRequest(name, enabled, roles, Collections.emptyList(), rules, + metadata, refreshPolicy)); } public void testPutRoleMappingRequestToXContent() throws IOException { @@ -108,7 +126,8 @@ public void testPutRoleMappingRequestToXContent() throws IOException { metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, rules, metadata, refreshPolicy); + final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, Collections.emptyList(), rules, + metadata, refreshPolicy); final XContentBuilder builder = XContentFactory.jsonBuilder(); putRoleMappingRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -117,6 +136,42 @@ public void testPutRoleMappingRequestToXContent() throws IOException { "{"+ "\"enabled\":" + enabled + "," + "\"roles\":[\"superuser\"]," + + "\"role_templates\":[]," + + "\"rules\":{" + + "\"field\":{\"username\":[\"user\"]}" + + "}," + + "\"metadata\":{\"k1\":\"v1\"}" + + "}"; + + assertThat(output, equalTo(expected)); + } + + public void testPutRoleMappingRequestWithTemplateToXContent() throws IOException { + final String name = randomAlphaOfLength(5); + final boolean enabled = randomBoolean(); + final List templates = Arrays.asList( + new TemplateRoleName(Collections.singletonMap("source" , "_realm_{{realm.name}}"), TemplateRoleName.Format.STRING), + new TemplateRoleName(Collections.singletonMap("source" , "some_role"), TemplateRoleName.Format.STRING) + ); + final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("user"); + final Map metadata = new HashMap<>(); + metadata.put("k1", "v1"); + final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); + + final PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, Collections.emptyList(), templates, + rules, metadata, refreshPolicy); + + final XContentBuilder builder = XContentFactory.jsonBuilder(); + putRoleMappingRequest.toXContent(builder, ToXContent.EMPTY_PARAMS); + final String output = Strings.toString(builder); + final String expected = + "{"+ + "\"enabled\":" + enabled + "," + + "\"roles\":[]," + + "\"role_templates\":[" + + "{\"template\":\"{\\\"source\\\":\\\"_realm_{{realm.name}}\\\"}\",\"format\":\"string\"}," + + "{\"template\":\"{\\\"source\\\":\\\"some_role\\\"}\",\"format\":\"string\"}" + + "]," + "\"rules\":{" + "\"field\":{\"username\":[\"user\"]}" + "}," + @@ -129,48 +184,59 @@ public void testPutRoleMappingRequestToXContent() throws IOException { public void testEqualsHashCode() { final String name = randomAlphaOfLength(5); final boolean enabled = randomBoolean(); - final List roles = Collections.singletonList("superuser"); + final List roles; + final List templates; + if (randomBoolean()) { + roles = Arrays.asList(randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(6, 12))); + templates = Collections.emptyList(); + } else { + roles = Collections.emptyList(); + templates = Arrays.asList( + randomArray(1, 3, TemplateRoleName[]::new, + () -> new TemplateRoleName(randomAlphaOfLengthBetween(12, 60), randomFrom(TemplateRoleName.Format.values())) + )); + } final RoleMapperExpression rules = FieldRoleMapperExpression.ofUsername("user"); final Map metadata = new HashMap<>(); metadata.put("k1", "v1"); final RefreshPolicy refreshPolicy = randomFrom(RefreshPolicy.values()); - PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, rules, metadata, refreshPolicy); + PutRoleMappingRequest putRoleMappingRequest = new PutRoleMappingRequest(name, enabled, roles, templates, rules, metadata, + refreshPolicy); assertNotNull(putRoleMappingRequest); EqualsHashCodeTestUtils.checkEqualsAndHashCode(putRoleMappingRequest, (original) -> { - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRules(), original - .getMetadata(), original.getRefreshPolicy()); - }); - EqualsHashCodeTestUtils.checkEqualsAndHashCode(putRoleMappingRequest, (original) -> { - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRules(), original - .getMetadata(), original.getRefreshPolicy()); + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRoleTemplates(), + original.getRules(), original.getMetadata(), original.getRefreshPolicy()); }, PutRoleMappingRequestTests::mutateTestItem); } private static PutRoleMappingRequest mutateTestItem(PutRoleMappingRequest original) { - switch (randomIntBetween(0, 4)) { + switch (randomIntBetween(0, 5)) { case 0: - return new PutRoleMappingRequest(randomAlphaOfLength(5), original.isEnabled(), original.getRoles(), original.getRules(), - original.getMetadata(), original.getRefreshPolicy()); + return new PutRoleMappingRequest(randomAlphaOfLength(5), original.isEnabled(), original.getRoles(), + original.getRoleTemplates(), original.getRules(), original.getMetadata(), original.getRefreshPolicy()); case 1: - return new PutRoleMappingRequest(original.getName(), !original.isEnabled(), original.getRoles(), original.getRules(), - original.getMetadata(), original.getRefreshPolicy()); + return new PutRoleMappingRequest(original.getName(), !original.isEnabled(), original.getRoles(), original.getRoleTemplates(), + original.getRules(), original.getMetadata(), original.getRefreshPolicy()); case 2: - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRoleTemplates(), FieldRoleMapperExpression.ofGroups("group"), original.getMetadata(), original.getRefreshPolicy()); case 3: - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRules(), - Collections.emptyMap(), original.getRefreshPolicy()); + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRoleTemplates(), + original.getRules(), Collections.emptyMap(), original.getRefreshPolicy()); case 4: - List values = Arrays.stream(RefreshPolicy.values()) - .filter(rp -> rp != original.getRefreshPolicy()) - .collect(Collectors.toList()); - return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRules(), original - .getMetadata(), randomFrom(values)); + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), original.getRoles(), original.getRoleTemplates(), + original.getRules(), original.getMetadata(), + randomValueOtherThan(original.getRefreshPolicy(), () -> randomFrom(RefreshPolicy.values()))); + case 5: + List roles = new ArrayList<>(original.getRoles()); + roles.add(randomAlphaOfLengthBetween(3, 5)); + return new PutRoleMappingRequest(original.getName(), original.isEnabled(), roles, Collections.emptyList(), + original.getRules(), original.getMetadata(), original.getRefreshPolicy()); + default: - return new PutRoleMappingRequest(randomAlphaOfLength(5), original.isEnabled(), original.getRoles(), original.getRules(), - original.getMetadata(), original.getRefreshPolicy()); + throw new IllegalStateException("Bad random value"); } } diff --git a/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc new file mode 100644 index 0000000000000..41fa841060b30 --- /dev/null +++ b/docs/java-rest/high-level/dataframe/get_data_frame.asciidoc @@ -0,0 +1,45 @@ +-- +:api: get-data-frame-transform +:request: GetDataFrameTransformRequest +:response: GetDataFrameTransformResponse +-- +[id="{upid}-{api}"] +=== Get Data Frame Transform API + +The Get Data Frame Transform API is used get one or more {dataframe-transform}. +The API accepts a +{request}+ object and returns a +{response}+. + +[id="{upid}-{api}-request"] +==== Get Data Frame Request + +A +{request}+ requires either a data frame transform id, a comma separated list of ids or +the special wildcard `_all` to get all {dataframe-transform}s + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request] +-------------------------------------------------- +<1> Constructing a new GET request referencing an existing {dataframe-transform} + +==== Optional Arguments + +The following arguments are optional. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-request-options] +-------------------------------------------------- +<1> Page {dataframe-transform}s starting from this value +<2> Return at most `size` {dataframe-transform}s + +include::../execution.asciidoc[] + +[id="{upid}-{api}-response"] +==== Response + +The returned +{response}+ contains the requested {dataframe-transform}s. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests-file}[{api}-response] +-------------------------------------------------- \ No newline at end of file diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index d077ad2fcf844..4e28efc2941db 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -554,6 +554,7 @@ include::ilm/remove_lifecycle_policy_from_index.asciidoc[] The Java High Level REST Client supports the following Data Frame APIs: +* <<{upid}-get-data-frame-transform>> * <<{upid}-get-data-frame-transform-stats>> * <<{upid}-put-data-frame-transform>> * <<{upid}-delete-data-frame-transform>> @@ -561,6 +562,7 @@ The Java High Level REST Client supports the following Data Frame APIs: * <<{upid}-start-data-frame-transform>> * <<{upid}-stop-data-frame-transform>> +include::dataframe/get_data_frame.asciidoc[] include::dataframe/get_data_frame_stats.asciidoc[] include::dataframe/put_data_frame.asciidoc[] include::dataframe/delete_data_frame.asciidoc[] diff --git a/docs/plugins/repository-azure.asciidoc b/docs/plugins/repository-azure.asciidoc index 9249efd5d1744..61dcadd6e10d6 100644 --- a/docs/plugins/repository-azure.asciidoc +++ b/docs/plugins/repository-azure.asciidoc @@ -126,9 +126,7 @@ The Azure repository supports following settings: setting doesn't affect index files that are already compressed by default. Defaults to `true`. -`readonly`:: - - Makes repository read-only. Defaults to `false`. +include::repository-shared-settings.asciidoc[] `location_mode`:: diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index f655d29307074..b34c9456a9306 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -240,6 +240,8 @@ The following settings are supported: setting doesn't affect index files that are already compressed by default. Defaults to `true`. +include::repository-shared-settings.asciidoc[] + `application_name`:: deprecated[7.0.0, This setting is now defined in the <>] diff --git a/docs/plugins/repository-hdfs.asciidoc b/docs/plugins/repository-hdfs.asciidoc index e798682a38699..bedb0e7e1ef87 100644 --- a/docs/plugins/repository-hdfs.asciidoc +++ b/docs/plugins/repository-hdfs.asciidoc @@ -64,6 +64,8 @@ The following settings are supported: Whether to compress the metadata or not. (Enabled by default) +include::repository-shared-settings.asciidoc[] + `chunk_size`:: Override the chunk size. (Disabled by default) diff --git a/docs/plugins/repository-s3.asciidoc b/docs/plugins/repository-s3.asciidoc index 7c4e763a3b04a..084d67f236472 100644 --- a/docs/plugins/repository-s3.asciidoc +++ b/docs/plugins/repository-s3.asciidoc @@ -213,6 +213,8 @@ The following settings are supported: setting doesn't affect index files that are already compressed by default. Defaults to `true`. +include::repository-shared-settings.asciidoc[] + `server_side_encryption`:: When set to `true` files are encrypted on server side using AES256 diff --git a/docs/plugins/repository-shared-settings.asciidoc b/docs/plugins/repository-shared-settings.asciidoc new file mode 100644 index 0000000000000..ca9345e0ffc2c --- /dev/null +++ b/docs/plugins/repository-shared-settings.asciidoc @@ -0,0 +1,11 @@ +`max_restore_bytes_per_sec`:: + + Throttles per node restore rate. Defaults to `40mb` per second. + +`max_snapshot_bytes_per_sec`:: + + Throttles per node snapshot rate. Defaults to `40mb` per second. + +`readonly`:: + + Makes repository read-only. Defaults to `false`. \ No newline at end of file diff --git a/docs/reference/commands/certutil.asciidoc b/docs/reference/commands/certutil.asciidoc index 06e9dc53bd9b6..6f4d3224d7aeb 100644 --- a/docs/reference/commands/certutil.asciidoc +++ b/docs/reference/commands/certutil.asciidoc @@ -177,14 +177,17 @@ with the `ca` parameter. `--pass `:: Specifies the password for the generated private keys. + -Keys stored in PKCS#12 format are always password protected. +Keys stored in PKCS#12 format are always password protected, however, +this password may be _blank_. If you want to specify a blank password +without a prompt, use `--pass ""` (with no `=`) on the command line. + Keys stored in PEM format are password protected only if the `--pass` parameter is specified. If you do not supply an argument for the `--pass` parameter, you are prompted for a password. -+ -If you want to specify a _blank_ password (without prompting), use -`--pass ""` (with no `=`). +Encrypted PEM files do not support blank passwords (if you do not +wish to password-protect your PEM keys, then do not specify +`--pass`). + `--pem`:: Generates certificates and keys in PEM format instead of PKCS#12. This parameter cannot be used with the `csr` parameter. diff --git a/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png b/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png index 2307f03932663..bf7f1c63135af 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png and b/docs/reference/images/sql/client-apps/dbeaver-1-new-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png b/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png index 1ca209a57e555..f63df0987c167 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png and b/docs/reference/images/sql/client-apps/dbeaver-2-conn-es.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png b/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png index 7561e94bdd991..825ce1b6357fb 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png and b/docs/reference/images/sql/client-apps/dbeaver-3-conn-props.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png b/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png index 62cef87a7ae9d..bcad2a75d801e 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png and b/docs/reference/images/sql/client-apps/dbeaver-4-driver-ver.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png b/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png index 70f2a1dd4dc2f..c76ae19937a08 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png and b/docs/reference/images/sql/client-apps/dbeaver-5-test-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbeaver-6-data.png b/docs/reference/images/sql/client-apps/dbeaver-6-data.png index 5d33441fe3b8c..053042b791116 100644 Binary files a/docs/reference/images/sql/client-apps/dbeaver-6-data.png and b/docs/reference/images/sql/client-apps/dbeaver-6-data.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png b/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png index b0ff89cc9d75a..cde4d9cc7cf26 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png and b/docs/reference/images/sql/client-apps/dbvis-1-driver-manager.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-2-driver.png b/docs/reference/images/sql/client-apps/dbvis-2-driver.png index b0f3a2927c968..cae3824547bc3 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-2-driver.png and b/docs/reference/images/sql/client-apps/dbvis-2-driver.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png b/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png index 7f89cf84a8e62..332895a2c8a8b 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png and b/docs/reference/images/sql/client-apps/dbvis-3-new-conn.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png b/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png index 2027949c401a7..d854dc826b1e1 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png and b/docs/reference/images/sql/client-apps/dbvis-4-conn-props.png differ diff --git a/docs/reference/images/sql/client-apps/dbvis-5-data.png b/docs/reference/images/sql/client-apps/dbvis-5-data.png index fb5ce8b86aa74..c67336568edc0 100644 Binary files a/docs/reference/images/sql/client-apps/dbvis-5-data.png and b/docs/reference/images/sql/client-apps/dbvis-5-data.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-3-add-driver.png b/docs/reference/images/sql/client-apps/squirell-3-add-driver.png index 9a9c2c2634e3c..29f06b7033d72 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-3-add-driver.png and b/docs/reference/images/sql/client-apps/squirell-3-add-driver.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-4-driver-list.png b/docs/reference/images/sql/client-apps/squirell-4-driver-list.png index 35f389747c970..a269e29d672ea 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-4-driver-list.png and b/docs/reference/images/sql/client-apps/squirell-4-driver-list.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-5-add-alias.png b/docs/reference/images/sql/client-apps/squirell-5-add-alias.png index d5587060d2eaa..1fc8e9ad60191 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-5-add-alias.png and b/docs/reference/images/sql/client-apps/squirell-5-add-alias.png differ diff --git a/docs/reference/images/sql/client-apps/squirell-7-data.png b/docs/reference/images/sql/client-apps/squirell-7-data.png index 760ade7c670fb..70837963b74b5 100644 Binary files a/docs/reference/images/sql/client-apps/squirell-7-data.png and b/docs/reference/images/sql/client-apps/squirell-7-data.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-2-add-driver.png b/docs/reference/images/sql/client-apps/workbench-2-add-driver.png index 03e740f400ae1..659cfd0c40760 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-2-add-driver.png and b/docs/reference/images/sql/client-apps/workbench-2-add-driver.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-3-connection.png b/docs/reference/images/sql/client-apps/workbench-3-connection.png index 32643375e3de9..9262ef0f533a2 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-3-connection.png and b/docs/reference/images/sql/client-apps/workbench-3-connection.png differ diff --git a/docs/reference/images/sql/client-apps/workbench-4-data.png b/docs/reference/images/sql/client-apps/workbench-4-data.png index 602f09d06e46f..7b8251fc9588a 100644 Binary files a/docs/reference/images/sql/client-apps/workbench-4-data.png and b/docs/reference/images/sql/client-apps/workbench-4-data.png differ diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc index 014923d463cbd..ee6cf3958375b 100644 --- a/docs/reference/index-modules/similarity.asciidoc +++ b/docs/reference/index-modules/similarity.asciidoc @@ -92,22 +92,14 @@ from randomness] framework. This similarity has the following options: [horizontal] `basic_model`:: - Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`be`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelD.html[`d`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`g`], + Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelG.html[`g`], {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIF.html[`if`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIn.html[`in`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIne.html[`ine`] and - {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelP.html[`p`]. - -`be`, `d` and `p` should be avoided in practice as they might return scores that -are equal to 0 or infinite with terms that do not meet the expected random -distribution. + {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIn.html[`in`] and + {lucene-core-javadoc}/org/apache/lucene/search/similarities/BasicModelIne.html[`ine`]. `after_effect`:: - Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffect.NoAfterEffect.html[`no`], - {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`b`] and - {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectL.html[`l`]. + Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`b`] and + {lucene-core-javadoc}/org/apache/lucene/search/similarities/AfterEffectB.html[`l`]. `normalization`:: Possible values: {lucene-core-javadoc}/org/apache/lucene/search/similarities/Normalization.NoNormalization.html[`no`], diff --git a/docs/reference/mapping/types.asciidoc b/docs/reference/mapping/types.asciidoc index 76b832a529fb4..c0db156dc3a1c 100644 --- a/docs/reference/mapping/types.asciidoc +++ b/docs/reference/mapping/types.asciidoc @@ -52,6 +52,7 @@ string:: <> and <> <>:: Record sparse vectors of float values. +<>:: A text-like field optimized for queries to implement as-you-type completion [float] === Multi-fields @@ -110,3 +111,5 @@ include::types/rank-features.asciidoc[] include::types/dense-vector.asciidoc[] include::types/sparse-vector.asciidoc[] + +include::types/search-as-you-type.asciidoc[] diff --git a/docs/reference/mapping/types/search-as-you-type.asciidoc b/docs/reference/mapping/types/search-as-you-type.asciidoc new file mode 100644 index 0000000000000..aec21f2e3ca6c --- /dev/null +++ b/docs/reference/mapping/types/search-as-you-type.asciidoc @@ -0,0 +1,258 @@ +[[search-as-you-type]] +=== Search as you type datatype + +experimental[] + +The `search_as_you_type` field type is a text-like field that is optimized to +provide out-of-the-box support for queries that serve an as-you-type completion +use case. It creates a series of subfields that are analyzed to index terms +that can be efficiently matched by a query that partially matches the entire +indexed text value. Both prefix completion (i.e matching terms starting at the +beginning of the input) and infix completion (i.e. matching terms at any +position within the input) are supported. + +When adding a field of this type to a mapping + +[source,js] +-------------------------------------------------- +PUT my_index +{ + "mappings": { + "properties": { + "my_field": { + "type": "search_as_you_type" + } + } + } +} +-------------------------------------------------- +// CONSOLE + +This creates the following fields + +[horizontal] + +`my_field`:: + + Analyzed as configured in the mapping. If an analyzer is not configured, + the default analyzer for the index is used + +`my_field._2gram`:: + + Wraps the analyzer of `my_field` with a shingle token filter of shingle + size 2 + +`my_field._3gram`:: + + Wraps the analyzer of `my_field` with a shingle token filter of shingle + size 3 + +`my_field._index_prefix`:: + + Wraps the analyzer of `my_field._3gram` with an edge ngram token filter + + +The size of shingles in subfields can be configured with the `max_shingle_size` +mapping parameter. The default is 3, and valid values for this parameter are +integer values 2 - 4 inclusive. Shingle subfields will be created for each +shingle size from 2 up to and including the `max_shingle_size`. The +`my_field._index_prefix` subfield will always use the analyzer from the shingle +subfield with the `max_shingle_size` when constructing its own analyzer. + +Increasing the `max_shingle_size` will improve matches for queries with more +consecutive terms, at the cost of larger index size. The default +`max_shingle_size` should usually be sufficient. + +The same input text is indexed into each of these fields automatically, with +their differing analysis chains, when an indexed document has a value for the +root field `my_field`. + +[source,js] +-------------------------------------------------- +PUT my_index/_doc/1?refresh +{ + "my_field": "quick brown fox jump lazy dog" +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The most efficient way of querying to serve a search-as-you-type use case is +usually a <> query of type +<> that targets the root +`search_as_you_type` field and its shingle subfields. This can match the query +terms in any order, but will score documents higher if they contain the terms +in order in a shingle subfield. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "multi_match": { + "query": "brown f", + "type": "bool_prefix", + "fields": [ + "my_field", + "my_field._2gram", + "my_field._3gram" + ] + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[source,js] +-------------------------------------------------- +{ + "took" : 44, + "timed_out" : false, + "_shards" : { + "total" : 1, + "successful" : 1, + "skipped" : 0, + "failed" : 0 + }, + "hits" : { + "total" : { + "value" : 1, + "relation" : "eq" + }, + "max_score" : 0.8630463, + "hits" : [ + { + "_index" : "my_index", + "_type" : "_doc", + "_id" : "1", + "_score" : 0.8630463, + "_source" : { + "my_field" : "quick brown fox jump lazy dog" + } + } + ] + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took" : 44/"took" : $body.took/] +// TESTRESPONSE[s/"max_score" : 0.8630463/"max_score" : $body.hits.max_score/] +// TESTRESPONSE[s/"_score" : 0.8630463/"_score" : $body.hits.hits.0._score/] + +To search for documents that strictly match the query terms in order, or to +search using other properties of phrase queries, use a +<> on the root +field. A <> can also be used +if the last term should be matched exactly, and not as a prefix. Using phrase +queries may be less efficient than using the `match_bool_prefix` query. + +[source,js] +-------------------------------------------------- +GET my_index/_search +{ + "query": { + "match_phrase_prefix": { + "my_field": "brown f" + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +[[specific-params]] +==== Parameters specific to the `search_as_you_type` field + +The following parameters are accepted in a mapping for the `search_as_you_type` +field and are specific to this field type + +[horizontal] + +`max_shingle_size`:: + + The largest shingle size to index the input with and create subfields for, + creating one subfield for each shingle size between 2 and + `max_shingle_size`. Accepts integer values between 2 and 4 inclusive. This + option defaults to 3. + + +[[general-params]] +==== Parameters of the field type as a text field + +The following parameters are accepted in a mapping for the `search_as_you_type` +field due to its nature as a text-like field, and behave similarly to their +behavior when configuring a field of the <> datatype. Unless +otherwise noted, these options configure the root fields subfields in +the same way. + +<>:: + + The <> which should be used for + <> string fields, both at index-time and at + search-time (unless overridden by the + <>). Defaults to the default index + analyzer, or the <>. + +<>:: + + Should the field be searchable? Accepts `true` (default) or `false`. + +<>:: + + What information should be stored in the index, for search and highlighting + purposes. Defaults to `positions`. + +<>:: + + Whether field-length should be taken into account when scoring queries. + Accepts `true` or `false`. This option configures the root field + and shingle subfields, where its default is `true`. It does not configure + the prefix subfield, where it it `false`. + +<>:: + + Whether the field value should be stored and retrievable separately from + the <> field. Accepts `true` or `false` + (default). This option only configures the root field, and does not + configure any subfields. + +<>:: + + The <> that should be used at search time on + <> fields. Defaults to the `analyzer` setting. + +<>:: + + The <> that should be used at search time when a + phrase is encountered. Defaults to the `search_analyzer` setting. + +<>:: + + Which scoring algorithm or _similarity_ should be used. Defaults + to `BM25`. + +<>:: + + Whether term vectors should be stored for an <> + field. Defaults to `no`. This option configures the root field and shingle + subfields, but not the prefix subfield. + + +[[prefix-queries]] +==== Optimization of prefix queries + +When making a <> query to the root field or +any of its subfields, the query will be rewritten to a +<> query on the `._index_prefix` subfield. This +matches more efficiently than is typical of `prefix` queries on text fields, +as prefixes up to a certain length of each shingle are indexed directly as +terms in the `._index_prefix` subfield. + +The analyzer of the `._index_prefix` subfield slightly modifies the +shingle-building behavior to also index prefixes of the terms at the end of the +field's value that normally would not be produced as shingles. For example, if +the value `quick brown fox` is indexed into a `search_as_you_type` field with +`max_shingle_size` of 3, prefixes for `brown fox` and `fox` are also indexed +into the `._index_prefix` subfield even though they do not appear as terms in +the `._3gram` subfield. This allows for completion of all the terms in the +field's input. diff --git a/docs/reference/migration/migrate_8_0.asciidoc b/docs/reference/migration/migrate_8_0.asciidoc index 789a2d5fa0ce4..0c695a3b2bb47 100644 --- a/docs/reference/migration/migrate_8_0.asciidoc +++ b/docs/reference/migration/migrate_8_0.asciidoc @@ -15,6 +15,7 @@ coming[8.0.0] * <> * <> * <> +* <> //NOTE: The notable-breaking-changes tagged regions are re-used in the //Installation and Upgrade Guide @@ -41,3 +42,4 @@ include::migrate_8_0/analysis.asciidoc[] include::migrate_8_0/discovery.asciidoc[] include::migrate_8_0/mappings.asciidoc[] include::migrate_8_0/snapshots.asciidoc[] +include::migrate_8_0/security.asciidoc[] diff --git a/docs/reference/migration/migrate_8_0/security.asciidoc b/docs/reference/migration/migrate_8_0/security.asciidoc new file mode 100644 index 0000000000000..e09d21764f740 --- /dev/null +++ b/docs/reference/migration/migrate_8_0/security.asciidoc @@ -0,0 +1,18 @@ +[float] +[[breaking_80_security_changes]] +=== Security changes + +[float] +==== The `accept_default_password` setting has been removed + +The `xpack.security.authc.accept_default_password` setting has not had any affect +since the 6.0 release of {es}. It has been removed and cannot be used. + +[float] +==== The `roles.index.cache.*` settings have been removed + +The `xpack.security.authz.store.roles.index.cache.max_size` and +`xpack.security.authz.store.roles.index.cache.ttl` settings have +been removed. These settings have been redundant and deprecated +since the 5.2 release of {es}. + diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index b134a626739fa..1c51cda907cf1 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -227,7 +227,7 @@ PUT _cluster/settings clusters are kept alive. If set to `-1`, application-level ping messages to this remote cluster are not sent. If unset, application-level ping messages are sent according to the global `transport.ping_schedule` setting, which - defaults to ``-1` meaning that pings are not sent. + defaults to `-1` meaning that pings are not sent. `cluster.remote.${cluster_alias}.transport.compress`:: @@ -237,6 +237,14 @@ PUT _cluster/settings Elasticsearch compresses the response. If unset, the global `transport.compress` is used as the fallback setting. +`cluster.remote.${cluster_alias}.proxy`:: + + Sets a proxy address for the specified remote cluster. By default this is not + set, meaning that Elasticsearch will connect directly to the nodes in the + remote cluster using their <>. + If this setting is set to an IP address or hostname then Elasticsearch will + connect to the nodes in the remote cluster using this address instead. + [float] [[retrieve-remote-clusters-info]] === Retrieving remote clusters info diff --git a/docs/reference/query-dsl/full-text-queries.asciidoc b/docs/reference/query-dsl/full-text-queries.asciidoc index 5fb5447dbb79a..0af99b61f194f 100644 --- a/docs/reference/query-dsl/full-text-queries.asciidoc +++ b/docs/reference/query-dsl/full-text-queries.asciidoc @@ -18,7 +18,12 @@ The queries in this group are: <>:: - The poor man's _search-as-you-type_. Like the `match_phrase` query, but does a wildcard search on the final word. + Like the `match_phrase` query, but does a wildcard search on the final word. + +<>:: + + Creates a `bool` query that matches each term as a `term` query, except for + the last term, which is matched as a `prefix` query <>:: @@ -50,6 +55,8 @@ include::match-phrase-query.asciidoc[] include::match-phrase-prefix-query.asciidoc[] +include::match-bool-prefix-query.asciidoc[] + include::multi-match-query.asciidoc[] include::common-terms-query.asciidoc[] diff --git a/docs/reference/query-dsl/match-bool-prefix-query.asciidoc b/docs/reference/query-dsl/match-bool-prefix-query.asciidoc new file mode 100644 index 0000000000000..623f2423d8055 --- /dev/null +++ b/docs/reference/query-dsl/match-bool-prefix-query.asciidoc @@ -0,0 +1,85 @@ +[[query-dsl-match-bool-prefix-query]] +=== Match Bool Prefix Query + +A `match_bool_prefix` query analyzes its input and constructs a +<> from the terms. Each term except the last +is used in a `term` query. The last term is used in a `prefix` query. A +`match_bool_prefix` query such as + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "match_bool_prefix" : { + "message" : "quick brown f" + } + } +} +-------------------------------------------------- +// CONSOLE + +where analysis produces the terms `quick`, `brown`, and `f` is similar to the +following `bool` query + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "bool" : { + "should": [ + { "term": { "message": "quick" }}, + { "term": { "message": "brown" }}, + { "prefix": { "message": "f"}} + ] + } + } +} +-------------------------------------------------- +// CONSOLE + +An important difference between the `match_bool_prefix` query and +<> is that the +`match_phrase_prefix` query matches its terms as a phrase, but the +`match_bool_prefix` query can match its terms in any position. The example +`match_bool_prefix` query above could match a field containing containing +`quick brown fox`, but it could also match `brown fox quick`. It could also +match a field containing the term `quick`, the term `brown` and a term +starting with `f`, appearing in any position. + +==== Parameters + +By default, `match_bool_prefix` queries' input text will be analyzed using the +analyzer from the queried field's mapping. A different search analyzer can be +configured with the `analyzer` parameter + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "match_bool_prefix" : { + "message": { + "query": "quick brown f", + "analyzer": "keyword" + } + } + } +} +-------------------------------------------------- +// CONSOLE + +`match_bool_prefix` queries support the +<> and `operator` +parameters as described for the +<>, applying the setting to the +constructed `bool` query. The number of clauses in the constructed `bool` +query will in most cases be the number of terms produced by analysis of the +query text. + +The <>, `prefix_length`, +`max_expansions`, `fuzzy_transpositions`, and `fuzzy_rewrite` parameters can +be applied to the `term` subqueries constructed for all terms but the final +term. They do not have any effect on the prefix query constructed for the +final term. diff --git a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc index 73f1be9143cf2..304eaf9a5b4f0 100644 --- a/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc +++ b/docs/reference/query-dsl/match-phrase-prefix-query.asciidoc @@ -59,6 +59,6 @@ for appears. For better solutions for _search-as-you-type_ see the <> and -{defguide}/_index_time_search_as_you_type.html[Index-Time Search-as-You-Type]. +the <>. =================================================== diff --git a/docs/reference/query-dsl/match-query.asciidoc b/docs/reference/query-dsl/match-query.asciidoc index b939364f12027..64c356ccaa80c 100644 --- a/docs/reference/query-dsl/match-query.asciidoc +++ b/docs/reference/query-dsl/match-query.asciidoc @@ -202,7 +202,6 @@ process. It does not support field name prefixes, wildcard characters, or other "advanced" features. For this reason, chances of it failing are very small / non existent, and it provides an excellent behavior when it comes to just analyze and run that text as a query behavior (which is -usually what a text search box does). Also, the <> -type can provide a great "as you type" behavior to automatically load search results. +usually what a text search box does). ************************************************** diff --git a/docs/reference/query-dsl/multi-match-query.asciidoc b/docs/reference/query-dsl/multi-match-query.asciidoc index 512eee4900b41..b8fbb61a950d0 100644 --- a/docs/reference/query-dsl/multi-match-query.asciidoc +++ b/docs/reference/query-dsl/multi-match-query.asciidoc @@ -91,6 +91,10 @@ parameter, which can be set to: `phrase_prefix`:: Runs a `match_phrase_prefix` query on each field and combines the `_score` from each field. See <>. +`bool_prefix`:: Creates a `match_bool_prefix` query on each field and + combines the `_score` from each field. See + <>. + [[type-best-fields]] ==== `best_fields` @@ -516,3 +520,36 @@ per-term `blended` queries. It accepts: =================================================== The `fuzziness` parameter cannot be used with the `cross_fields` type. =================================================== + +[[type-bool-prefix]] +==== `bool_prefix` + +The `bool_prefix` type's scoring behaves like <>, but using a +<> instead of a +`match` query. + +[source,js] +-------------------------------------------------- +GET /_search +{ + "query": { + "multi_match" : { + "query": "quick brown f", + "type": "bool_prefix", + "fields": [ "subject", "message" ] + } + } +} +-------------------------------------------------- +// CONSOLE + +The `analyzer`, `boost`, `operator`, `minimum_should_match`, `lenient`, +`zero_terms_query`, and `auto_generate_synonyms_phrase_query` parameters as +explained in <> are supported. The +`fuzziness`, `prefix_length`, `max_expansions`, `rewrite`, and +`fuzzy_transpositions` parameters are supported for the terms that are used to +construct term queries, but do not have an effect on the prefix query +constructed from the final term. + +The `slop` and `cutoff_frequency` parameters are not supported by this query +type. diff --git a/docs/reference/query-dsl/script-score-query.asciidoc b/docs/reference/query-dsl/script-score-query.asciidoc index ee68d3e40fe13..56c4f7c41b8ee 100644 --- a/docs/reference/query-dsl/script-score-query.asciidoc +++ b/docs/reference/query-dsl/script-score-query.asciidoc @@ -182,60 +182,44 @@ different from the query's vector, 0 is used for missing dimensions in the calculations of vector functions. -[[random-functions]] -===== Random functions -There are two predefined ways to produce random values: -`randomNotReproducible` and `randomReproducible`. +[[random-score-function]] +===== Random score function +`random_score` function generates scores that are uniformly distributed +from 0 up to but not including 1. -`randomNotReproducible()` uses `java.util.Random` class -to generate a random value of the type `long`. -The generated values are not reproducible between requests' invocations. +`randomScore` function has the following syntax: +`randomScore(, )`. +It has a required parameter - `seed` as an integer value, +and an optional parameter - `fieldName` as a string value. [source,js] -------------------------------------------------- "script" : { - "source" : "randomNotReproducible()" + "source" : "randomScore(100, '_seq_no')" } -------------------------------------------------- // NOTCONSOLE - -`randomReproducible(String seedValue, int seed)` produces -reproducible random values of type `long`. This function requires -more computational time and memory than the non-reproducible version. - -A good candidate for the `seedValue` is document field values that -are unique across documents and already pre-calculated and preloaded -in the memory. For example, values of the document's `_seq_no` field -is a good candidate, as documents on the same shard have unique values -for the `_seq_no` field. +If the `fieldName` parameter is omitted, the internal Lucene +document ids will be used as a source of randomness. This is very efficient, +but unfortunately not reproducible since documents might be renumbered +by merges. [source,js] -------------------------------------------------- "script" : { - "source" : "randomReproducible(Long.toString(doc['_seq_no'].value), 100)" + "source" : "randomScore(100)" } -------------------------------------------------- // NOTCONSOLE -A drawback of using `_seq_no` is that generated values change if -documents are updated. Another drawback is not absolute uniqueness, as -documents from different shards with the same sequence numbers -generate the same random values. - -If you need random values to be distinct across different shards, -you can use a field with unique values across shards, -such as `_id`, but watch out for the memory usage as all -these unique values need to be loaded into memory. - -[source,js] --------------------------------------------------- -"script" : { - "source" : "randomReproducible(doc['_id'].value, 100)" -} --------------------------------------------------- -// NOTCONSOLE +Note that documents that are within the same shard and have the +same value for field will get the same score, so it is usually desirable +to use a field that has unique values for all documents across a shard. +A good default choice might be to use the `_seq_no` +field, whose only drawback is that scores will change if the document is +updated since update operations also update the value of the `_seq_no` field. [[decay-functions]] @@ -349,8 +333,8 @@ the following script: ===== `random_score` -Use `randomReproducible` and `randomNotReproducible` functions -as described in <>. +Use `randomScore` function +as described in <>. ===== `field_value_factor` diff --git a/docs/reference/rollup/apis/rollup-job-config.asciidoc b/docs/reference/rollup/apis/rollup-job-config.asciidoc index 885d4e82cf6b0..852f7b879fb38 100644 --- a/docs/reference/rollup/apis/rollup-job-config.asciidoc +++ b/docs/reference/rollup/apis/rollup-job-config.asciidoc @@ -69,7 +69,7 @@ In the above example, there are several pieces of logistical configuration for t `rollup_index` (required):: (string) The index that you wish to store rollup results into. All the rollup data that is generated by the job will be stored in this index. When searching the rollup data, this index will be used in the <> endpoint's URL. - The rollup index be shared with other rollup jobs. The data is stored so that it doesn't interfere with unrelated jobs. + The rollup index can be shared with other rollup jobs. The data is stored so that it doesn't interfere with unrelated jobs. `cron` (required):: (string) A cron string which defines when the rollup job should be executed. The cron string defines an interval of when to run diff --git a/docs/reference/search/suggesters/completion-suggest.asciidoc b/docs/reference/search/suggesters/completion-suggest.asciidoc index b27e6f0ef0b54..c89dce3d24160 100644 --- a/docs/reference/search/suggesters/completion-suggest.asciidoc +++ b/docs/reference/search/suggesters/completion-suggest.asciidoc @@ -2,7 +2,9 @@ === Completion Suggester NOTE: In order to understand the format of suggestions, please -read the <> page first. +read the <> page first. For more flexible +search-as-you-type searches that do not use suggesters, see the +<>. The `completion` suggester provides auto-complete/search-as-you-type functionality. This is a navigational feature to guide users to diff --git a/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc b/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc index 82b3402edfaf6..8a7792b525860 100644 --- a/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/dbeaver.asciidoc @@ -12,7 +12,7 @@ IMPORTANT: Elastic does not endorse, promote or provide support for this applica ==== Prerequisites -* DBeaver version 5.1.4 or higher +* DBeaver version 6.0.0 or higher * {es-sql} <> ==== New Connection diff --git a/docs/reference/sql/endpoints/client-apps/index.asciidoc b/docs/reference/sql/endpoints/client-apps/index.asciidoc index 87f3c2f609d3f..a84b8c2fb09e2 100644 --- a/docs/reference/sql/endpoints/client-apps/index.asciidoc +++ b/docs/reference/sql/endpoints/client-apps/index.asciidoc @@ -33,5 +33,5 @@ include::ps1.asciidoc[] include::microstrat.asciidoc[] include::qlik.asciidoc[] include::squirrel.asciidoc[] -include::tableau.asciidoc[] include::workbench.asciidoc[] +include::tableau.asciidoc[] diff --git a/docs/reference/sql/endpoints/jdbc.asciidoc b/docs/reference/sql/endpoints/jdbc.asciidoc index 9b4e2fa748197..37f3d59ef6410 100644 --- a/docs/reference/sql/endpoints/jdbc.asciidoc +++ b/docs/reference/sql/endpoints/jdbc.asciidoc @@ -124,7 +124,8 @@ Query timeout (in seconds). That is the maximum amount of time waiting for a que [float] ==== Mapping -`field.multi.value.leniency` (default `true`):: Whether to be lenient and return the first value for fields with multiple values (true) or throw an exception. +`field.multi.value.leniency` (default `true`):: Whether to be lenient and return the first value (without any guarantees of what that +will be - typically the first in natural ascending order) for fields with multiple values (true) or throw an exception. [float] ==== Additional diff --git a/docs/reference/sql/endpoints/odbc.asciidoc b/docs/reference/sql/endpoints/odbc.asciidoc index 1a7dd974281c8..fd92a37dca650 100644 --- a/docs/reference/sql/endpoints/odbc.asciidoc +++ b/docs/reference/sql/endpoints/odbc.asciidoc @@ -9,9 +9,12 @@ [float] === Overview -{odbc} is a feature-rich 3.80 ODBC driver for {es}. -It is a core level driver, exposing all of the functionality accessible through the {es}'s SQL ODBC API, converting ODBC calls into -{es-sql}. +{odbc} is a 3.80 compliant ODBC driver for {es}. +It is a core level driver, exposing all of the functionality accessible through +the {es}'s SQL API, converting ODBC calls into {es-sql}. + +In order to make use of the driver, the server must have {es-sql} installed and +running with the valid license. * <> * <> diff --git a/docs/reference/sql/endpoints/odbc/configuration.asciidoc b/docs/reference/sql/endpoints/odbc/configuration.asciidoc index 8bda67ce063d5..70ba437b64851 100644 --- a/docs/reference/sql/endpoints/odbc/configuration.asciidoc +++ b/docs/reference/sql/endpoints/odbc/configuration.asciidoc @@ -162,6 +162,8 @@ In case the server uses a certificate that is not part of the PKI, for example u + The driver will only read the contents of the file just before a connection is attempted. See <> section further on how to check the validity of the provided parameters. + +NOTE: The certificate file can not be bundled or password protected since the driver will not prompt for a password. ++ If using the file browser to locate the certificate - by pressing the _Browse..._ button - only files with _.pem_ and _.der_ extensions will be considered by default. Choose _All Files (\*.*)_ from the drop down, if your file ends with a different extension: + @@ -260,7 +262,95 @@ image:images/sql/odbc/env_var_log.png[] NOTE: When enabling the logging through the environment variable, the driver will create *one log file per process*. -Both ways of configuring the logging can coexist and both can use the same destination logging directory. However, one logging message -will only be logged once, the connection logging taking precedence over the environment variable logging. +Both ways of configuring the logging can coexist and both can use the same +destination logging directory. However, one logging message will only be logged +once, the connection logging taking precedence over the environment variable +logging. + +[[odbc-cfg-dsnparams]] +[float] +==== Connection string parameters + +The following is a list of additional parameters that can be configured for a +particular connection, in case the default behavior of the driver is not +suitable. This can be done within the client application, in a manner +particular to that application, generally in a free text input box (sometimes +named "Connection string", "String extras", or similar). The format of the +string is `Attribute1=Value1`. Multiple attributes can be specified, separated +by a semicolon `Attribute1=Value1;Attribute2=Value2;`. The attribute names are +given below. + +`Timeout` (default: `0`):: +The maximum time (in seconds) a request to the server can take. This can be +overridden by a larger statement-level timeout setting. The value 0 means no +timeout. + +`Follow` (default: `yes`):: +A boolean value (`yes`|`no` / `true`|`false` / `0`|`1`) controlling if the +driver will follow HTTP redirects. + + +`MaxFetchSize` (default: `0`):: +The maximum number of rows that {es-sql} server should send the driver for one +page. This corresponds to {es-sql}'s request parameter `fetch_size` (see +<>). The value 0 means server default. + + +`MaxBodySizeMB` (default: `100`):: +The maximum size (in megabytes) that an answer can grow to, before being +rejected as too large by the driver. +This is concerning the HTTP answer body of one page, not the cumulated data +volume that a query might generate. + + +`ApplyTZ` (default: `no`):: +A boolean value controlling the timezone of: + +* the context in which the query will execute (especially relevant for functions dealing with timestamp components); + +* the timestamps received from / sent to the server. +If disabled, the UTC timezone will apply; otherwise, the local machine's set +timezone. + + +`ScientificFloats` (default: `default`):: +Controls how the floating point numbers will be printed, when these are +converted to string by the driver. Possible values given to this parameter: + +* `scientific`: the exponential notation (ex.: 1.23E01); + +* `default`: the default notation (ex.: 12.3); + +* `auto`: the driver will choose one of the above depending on the value to be +printed. +Note that the number of decimals is dependent on the precision (or ODBC scale) +of the value being printed and varies with the different floating point types +supported by {es-sql}. +This setting is not effective when the application fetches from the driver the +values as numbers and then does the conversion subsequently itself. + + +`VersionChecking` (default: `strict`):: +By default, the version of the driver and that of the server must be the same. +This parameter will allow a driver to connect to a server of different version. +The variation however can only be of the minor version, both endpoints must be +of same major version number. +Possible values: + +* `strict`: the versions must be in sync; + +* `major`: the versions must have the same major number. + +WARNING: This parameter can only be used for troubleshooting purposes. Running +with versions out of sync is not supported. + + +`MultiFieldLenient` (default: `true`):: +This boolean parameter controls the behavior of the server in case a +multi-value field is queried. In case this is set and the server encounters +such a field, it will pick a value in the set - without any guarantees of what +that will be, but typically the first in natural ascending order - and return +it as the value for the column. If not set, the server will return an error. +This corresponds to {es-sql}'s request parameter `field_multi_value_leniency` +(see <>). -// vim: set noet fenc=utf-8 ff=dos sts=0 sw=4 ts=4 tw=138 diff --git a/docs/reference/sql/endpoints/odbc/installation.asciidoc b/docs/reference/sql/endpoints/odbc/installation.asciidoc index 08f0c66ee2a8f..3a024e443d7c8 100644 --- a/docs/reference/sql/endpoints/odbc/installation.asciidoc +++ b/docs/reference/sql/endpoints/odbc/installation.asciidoc @@ -21,6 +21,11 @@ If you fail to meet any of the prerequisites the installer will show an error me NOTE: It is not possible to inline upgrade using the MSI. In order to upgrade, you will first have to uninstall the old driver and then install the new driver. +NOTE: When installing the MSI, the Windows Defender SmartScreen might warn +about running an unrecognized app. If the MSI has been downloaded from +Elastic's web site, it is safe to acknowledge the message by allowing the +installation to continue (`Run anyway`). + [[download]] ==== Download the `.msi` package(s) diff --git a/docs/reference/sql/functions/aggs.asciidoc b/docs/reference/sql/functions/aggs.asciidoc index c5ae050add9d7..cc0f06cb3bb5e 100644 --- a/docs/reference/sql/functions/aggs.asciidoc +++ b/docs/reference/sql/functions/aggs.asciidoc @@ -6,10 +6,12 @@ Functions for computing a _single_ result from a set of input values. {es-sql} supports aggregate functions only alongside <> (implicit or explicit). -==== General Purpose +[[sql-functions-aggs-general]] +[float] +=== General Purpose [[sql-functions-aggs-avg]] -===== `AVG` +==== `AVG` .Synopsis: [source, sql] @@ -33,7 +35,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggAvg] -------------------------------------------------- [[sql-functions-aggs-count]] -===== `COUNT` +==== `COUNT` .Synopsis: [source, sql] @@ -63,7 +65,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountStar] [[sql-functions-aggs-count-all]] -===== `COUNT(ALL)` +==== `COUNT(ALL)` .Synopsis: [source, sql] @@ -88,7 +90,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountAll] [[sql-functions-aggs-count-distinct]] -===== `COUNT(DISTINCT)` +==== `COUNT(DISTINCT)` .Synopsis: [source, sql] @@ -112,7 +114,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggCountDistinct] -------------------------------------------------- [[sql-functions-aggs-first]] -===== `FIRST/FIRST_VALUE` +==== `FIRST/FIRST_VALUE` .Synopsis: [source, sql] @@ -207,7 +209,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[firstValueWithTwoArgsAndGroupBy] the field is also <>. [[sql-functions-aggs-last]] -===== `LAST/LAST_VALUE` +==== `LAST/LAST_VALUE` .Synopsis: [source, sql] @@ -302,7 +304,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[lastValueWithTwoArgsAndGroupBy] the field is also <>. [[sql-functions-aggs-max]] -===== `MAX` +==== `MAX` .Synopsis: [source, sql] @@ -330,7 +332,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMax] <> and therefore, it cannot be used in `HAVING` clause. [[sql-functions-aggs-min]] -===== `MIN` +==== `MIN` .Synopsis: [source, sql] @@ -358,7 +360,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMin] <> and therefore, it cannot be used in `HAVING` clause. [[sql-functions-aggs-sum]] -===== `SUM` +==== `SUM` .Synopsis: [source, sql] @@ -381,10 +383,12 @@ Returns the sum of input values in the field `field_name`. include-tagged::{sql-specs}/docs/docs.csv-spec[aggSum] -------------------------------------------------- -==== Statistics +[[sql-functions-aggs-statistics]] +[float] +=== Statistics [[sql-functions-aggs-kurtosis]] -===== `KURTOSIS` +==== `KURTOSIS` .Synopsis: [source, sql] @@ -408,7 +412,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggKurtosis] -------------------------------------------------- [[sql-functions-aggs-mad]] -===== `MAD` +==== `MAD` .Synopsis: [source, sql] @@ -432,7 +436,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggMad] -------------------------------------------------- [[sql-functions-aggs-percentile]] -===== `PERCENTILE` +==== `PERCENTILE` .Synopsis: [source, sql] @@ -458,7 +462,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggPercentile] -------------------------------------------------- [[sql-functions-aggs-percentile-rank]] -===== `PERCENTILE_RANK` +==== `PERCENTILE_RANK` .Synopsis: [source, sql] @@ -484,7 +488,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggPercentileRank] -------------------------------------------------- [[sql-functions-aggs-skewness]] -===== `SKEWNESS` +==== `SKEWNESS` .Synopsis: [source, sql] @@ -508,7 +512,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggSkewness] -------------------------------------------------- [[sql-functions-aggs-stddev-pop]] -===== `STDDEV_POP` +==== `STDDEV_POP` .Synopsis: [source, sql] @@ -532,7 +536,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggStddevPop] -------------------------------------------------- [[sql-functions-aggs-sum-squares]] -===== `SUM_OF_SQUARES` +==== `SUM_OF_SQUARES` .Synopsis: [source, sql] @@ -556,7 +560,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[aggSumOfSquares] -------------------------------------------------- [[sql-functions-aggs-var-pop]] -===== `VAR_POP` +==== `VAR_POP` .Synopsis: [source, sql] diff --git a/docs/reference/sql/functions/conditional.asciidoc b/docs/reference/sql/functions/conditional.asciidoc index 122edf42feaab..ce8d5c3e66ced 100644 --- a/docs/reference/sql/functions/conditional.asciidoc +++ b/docs/reference/sql/functions/conditional.asciidoc @@ -45,14 +45,13 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNonNull] include-tagged::{sql-specs}/docs/docs.csv-spec[coalesceReturnNull] ---- - -[[sql-functions-conditional-ifnull]] -==== `IFNULL` +[[sql-functions-conditional-greatest]] +==== `GREATEST` .Synopsis: [source, sql] ---- -IFNULL(expression<1>, expression<2>) +GREATEST(expression<1>, expression<2>, ...) ---- *Input*: @@ -61,35 +60,39 @@ IFNULL(expression<1>, expression<2>) <2> 2nd expression +... -*Output*: 2nd expression if 1st expression is null, otherwise 1st expression. +**N**th expression + +GREATEST can take an arbitrary number of arguments and +all of them must be of the same data type. + +*Output*: one of the expressions or `null` .Description -Variant of <> with only two arguments. -Returns the first of its arguments that is not null. +Returns the argument that has the largest value which is not null. If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnFirst] +include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNonNull] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnSecond] +include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNull] ---- - -[[sql-functions-conditional-isnull]] -==== `ISNULL` +[[sql-functions-conditional-ifnull]] +==== `IFNULL` .Synopsis: [source, sql] ---- -ISNULL(expression<1>, expression<2>) +IFNULL(expression<1>, expression<2>) ---- *Input*: @@ -111,22 +114,22 @@ If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnFirst] +include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnFirst] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnSecond] +include-tagged::{sql-specs}/docs/docs.csv-spec[ifNullReturnSecond] ---- -[[sql-functions-conditional-nvl]] -==== `NVL` +[[sql-functions-conditional-isnull]] +==== `ISNULL` .Synopsis: [source, sql] ---- -NVL(expression<1>, expression<2>) +ISNULL(expression<1>, expression<2>) ---- *Input*: @@ -148,22 +151,22 @@ If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnFirst] +include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnFirst] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnSecond] +include-tagged::{sql-specs}/docs/docs.csv-spec[isNullReturnSecond] ---- -[[sql-functions-conditional-nullif]] -==== `NULLIF` +[[sql-functions-conditional-least]] +==== `LEAST` .Synopsis: [source, sql] ---- -NULLIF(expression<1>, expression<2>) +LEAST(expression<1>, expression<2>, ...) ---- *Input*: @@ -172,33 +175,40 @@ NULLIF(expression<1>, expression<2>) <2> 2nd expression +... -*Output*: `null` if the 2 expressions are equal, otherwise the 1st expression. +**N**th expression + +LEAST can take an arbitrary number of arguments and +all of them must be of the same data type. + +*Output*: one of the expressions or `null` .Description -Returns `null` when the two input expressions are equal and -if not, it returns the 1st expression. +Returns the argument that has the smallest value which is not null. +If all arguments are null, then it returns `null`. + ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnFirst] +include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNonNull] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNull] ---- -[[sql-functions-conditional-greatest]] -==== `GREATEST` +[[sql-functions-conditional-nullif]] +==== `NULLIF` .Synopsis: [source, sql] ---- -GREATEST(expression<1>, expression<2>, ...) +NULLIF(expression<1>, expression<2>) ---- *Input*: @@ -207,40 +217,33 @@ GREATEST(expression<1>, expression<2>, ...) <2> 2nd expression -... - -**N**th expression - -GREATEST can take an arbitrary number of arguments and -all of them must be of the same data type. -*Output*: one of the expressions or `null` +*Output*: `null` if the 2 expressions are equal, otherwise the 1st expression. .Description -Returns the argument that has the largest value which is not null. -If all arguments are null, then it returns `null`. - +Returns `null` when the two input expressions are equal and +if not, it returns the 1st expression. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNonNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnFirst] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[greatestReturnNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nullIfReturnNull] ---- -[[sql-functions-conditional-least]] -==== `LEAST` +[[sql-functions-conditional-nvl]] +==== `NVL` .Synopsis: [source, sql] ---- -LEAST(expression<1>, expression<2>, ...) +NVL(expression<1>, expression<2>) ---- *Input*: @@ -249,28 +252,25 @@ LEAST(expression<1>, expression<2>, ...) <2> 2nd expression -... - -**N**th expression - -LEAST can take an arbitrary number of arguments and -all of them must be of the same data type. -*Output*: one of the expressions or `null` +*Output*: 2nd expression if 1st expression is null, otherwise 1st expression. .Description -Returns the argument that has the smallest value which is not null. +Variant of <> with only two arguments. +Returns the first of its arguments that is not null. If all arguments are null, then it returns `null`. ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNonNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnFirst] ---- ["source","sql",subs="attributes,callouts,macros"] ---- -include-tagged::{sql-specs}/docs/docs.csv-spec[leastReturnNull] +include-tagged::{sql-specs}/docs/docs.csv-spec[nvlReturnSecond] ---- + + diff --git a/docs/reference/sql/functions/index.asciidoc b/docs/reference/sql/functions/index.asciidoc index 94b5f767f86f9..6e966403ce0e9 100644 --- a/docs/reference/sql/functions/index.asciidoc +++ b/docs/reference/sql/functions/index.asciidoc @@ -7,14 +7,114 @@ * <> * <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> +** <> * <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> +** <> +** <> +** <> * <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> -* <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> +* <> +** <> +** <> * <> +** <> +** <> +** <> +** <> +** <> +** <> +** <> * <> +** <> +** <> include::operators.asciidoc[] include::aggs.asciidoc[] diff --git a/docs/reference/sql/functions/math.asciidoc b/docs/reference/sql/functions/math.asciidoc index 895ba077b0961..a520a89ebaa2f 100644 --- a/docs/reference/sql/functions/math.asciidoc +++ b/docs/reference/sql/functions/math.asciidoc @@ -6,10 +6,12 @@ All math and trigonometric functions require their input (where applicable) to be numeric. -==== Generic +[[sql-functions-math-generic]] +[float] +=== Generic [[sql-functions-math-abs]] -===== `ABS` +==== `ABS` .Synopsis: [source, sql] @@ -33,7 +35,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[abs] -------------------------------------------------- [[sql-functions-math-cbrt]] -===== `CBRT` +==== `CBRT` .Synopsis: [source, sql] @@ -57,7 +59,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCbrtWithNegativeValue] -------------------------------------------------- [[sql-functions-math-ceil]] -===== `CEIL/CEILING` +==== `CEIL/CEILING` .Synopsis: [source, sql] @@ -81,7 +83,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCeiling] -------------------------------------------------- [[sql-functions-math-e]] -===== `E` +==== `E` .Synopsis: [source, sql] @@ -103,7 +105,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathEulersNumber] -------------------------------------------------- [[sql-functions-math-exp]] -===== `EXP` +==== `EXP` .Synopsis: [source, sql] @@ -127,7 +129,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathExpInline] -------------------------------------------------- [[sql-functions-math-expm1]] -===== `EXPM1` +==== `EXPM1` .Synopsis: [source, sql] @@ -151,7 +153,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathExpm1Inline] -------------------------------------------------- [[sql-functions-math-floor]] -===== `FLOOR` +==== `FLOOR` .Synopsis: [source, sql] @@ -175,7 +177,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineFloor] -------------------------------------------------- [[sql-functions-math-log]] -===== `LOG` +==== `LOG` .Synopsis: [source, sql] @@ -199,7 +201,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineLog] -------------------------------------------------- [[sql-functions-math-log10]] -===== `LOG10` +==== `LOG10` .Synopsis: [source, sql] @@ -223,7 +225,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineLog10] -------------------------------------------------- [[sql-functions-math-pi]] -===== `PI` +==== `PI` .Synopsis: [source, sql] @@ -245,7 +247,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathPINumber] -------------------------------------------------- [[sql-functions-math-power]] -===== `POWER` +==== `POWER` .Synopsis: [source, sql] @@ -275,7 +277,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlinePowerNegative] -------------------------------------------------- [[sql-functions-math-random]] -===== `RANDOM/RAND` +==== `RANDOM/RAND` .Synopsis: [source, sql] @@ -299,7 +301,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathRandom] -------------------------------------------------- [[sql-functions-math-round]] -===== `ROUND` +==== `ROUND` .Synopsis: [source, sql] @@ -330,7 +332,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathRoundWithNegativeParameter] -------------------------------------------------- [[sql-functions-math-sign]] -===== `SIGN/SIGNUM` +==== `SIGN/SIGNUM` .Synopsis: [source, sql] @@ -355,7 +357,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSign] [[sql-functions-math-sqrt]] -===== `SQRT` +==== `SQRT` .Synopsis: [source, sql] @@ -379,7 +381,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSqrt] -------------------------------------------------- [[sql-functions-math-truncate]] -===== `TRUNCATE` +==== `TRUNCATE` .Synopsis: [source, sql] @@ -409,10 +411,12 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathTruncateWithPositiveParameter include-tagged::{sql-specs}/docs/docs.csv-spec[mathTruncateWithNegativeParameter] -------------------------------------------------- -==== Trigonometric +[[sql-functions-math-trigonometric]] +[float] +=== Trigonometric [[sql-functions-math-acos]] -===== `ACOS` +==== `ACOS` .Synopsis: [source, sql] @@ -436,7 +440,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAcos] -------------------------------------------------- [[sql-functions-math-asin]] -===== `ASIN` +==== `ASIN` .Synopsis: [source, sql] @@ -460,7 +464,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAsin] -------------------------------------------------- [[sql-functions-math-atan]] -===== `ATAN` +==== `ATAN` .Synopsis: [source, sql] @@ -484,7 +488,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAtan] -------------------------------------------------- [[sql-functions-math-atan2]] -===== `ATAN2` +==== `ATAN2` .Synopsis: [source, sql] @@ -509,7 +513,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineAtan2] -------------------------------------------------- [[sql-functions-math-cos]] -===== `COS` +==== `COS` .Synopsis: [source, sql] @@ -533,7 +537,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCosine] -------------------------------------------------- [[sql-functions-math-cosh]] -===== `COSH` +==== `COSH` .Synopsis: [source, sql] @@ -557,7 +561,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCosh] -------------------------------------------------- [[sql-functions-math-cot]] -===== `COT` +==== `COT` .Synopsis: [source, sql] @@ -581,7 +585,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineCotangent] -------------------------------------------------- [[sql-functions-math-degrees]] -===== `DEGREES` +==== `DEGREES` .Synopsis: [source, sql] @@ -606,7 +610,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineDegrees] -------------------------------------------------- [[sql-functions-math-radians]] -===== `RADIANS` +==== `RADIANS` .Synopsis: [source, sql] @@ -631,7 +635,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineRadians] -------------------------------------------------- [[sql-functions-math-sin]] -===== `SIN` +==== `SIN` .Synopsis: [source, sql] @@ -655,7 +659,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSine] -------------------------------------------------- [[sql-functions-math-sinh]] -===== `SINH` +==== `SINH` .Synopsis: [source, sql] @@ -679,7 +683,7 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[mathInlineSinh] -------------------------------------------------- [[sql-functions-math-tan]] -===== `TAN` +==== `TAN` .Synopsis: [source, sql] diff --git a/docs/reference/sql/functions/search.asciidoc b/docs/reference/sql/functions/search.asciidoc index 295fbfd220770..0534271caa91f 100644 --- a/docs/reference/sql/functions/search.asciidoc +++ b/docs/reference/sql/functions/search.asciidoc @@ -8,6 +8,118 @@ when the `MATCH` or `QUERY` predicates are being used. Outside a, so-called, search context, these functions will return default values such as `0` or `NULL`. +[[sql-functions-search-match]] +==== `MATCH` + +.Synopsis: +[source, sql] +-------------------------------------------------- +MATCH(field_exp<1>, constant_exp<2>[, options]<3>) +-------------------------------------------------- + +*Input*: + +<1> field(s) to match +<2> matching text +<3> additional parameters; optional + +.Description: + +A full-text search option, in the form of a predicate, available in {es-sql} that gives the user control over powerful <> +and <> {es} queries. + +The first parameter is the field or fields to match against. In case it receives one value only, {es-sql} will use a `match` query to perform the search: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[simpleMatch] +---- + +However, it can also receive a list of fields and their corresponding optional `boost` value. In this case, {es-sql} will use a +`multi_match` query to match the documents: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[multiFieldsMatch] +---- + +NOTE: The `multi_match` query in {es} has the option of <> that gives preferential weight +(in terms of scoring) to fields being searched in, using the `^` character. In the example above, the `name` field has a greater weight in +the final score than the `author` field when searching for `frank dune` text in both of them. + +Both options above can be used in combination with the optional third parameter of the `MATCH()` predicate, where one can specify +additional configuration parameters (separated by semicolon `;`) for either `match` or `multi_match` queries. For example: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParamsForMatch] +---- + +In the more advanced example above, the `cutoff_frequency` parameter allows specifying an absolute or relative document frequency where +high frequency terms are moved into an optional subquery and are only scored if one of the low frequency (below the cutoff) terms in the +case of an `or` operator or all of the low frequency terms in the case of an `and` operator match. More about this you can find in the +<> page. + +NOTE: The allowed optional parameters for a single-field `MATCH()` variant (for the `match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, +`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`max_expansions`, `prefix_length`. + +NOTE: The allowed optional parameters for a multi-field `MATCH()` variant (for the `multi_match` {es} query) are: `analyzer`, `auto_generate_synonyms_phrase_query`, +`cutoff_frequency`, `lenient`, `fuzziness`, `fuzzy_transpositions`, `fuzzy_rewrite`, `minimum_should_match`, `operator`, +`max_expansions`, `prefix_length`, `slop`, `tie_breaker`, `type`. + + +[[sql-functions-search-query]] +==== `QUERY` + +.Synopsis: +[source, sql] +-------------------------------------------------- +QUERY(constant_exp<1>[, options]<2>) +-------------------------------------------------- + +*Input*: + +<1> query text +<2> additional parameters; optional + +.Description: + +Just like `MATCH`, `QUERY` is a full-text search predicate that gives the user control over the <> query in {es}. + +The first parameter is basically the input that will be passed as is to the `query_string` query, which means that anything that `query_string` +accepts in its `query` field can be used here as well: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[simpleQueryQuery] +---- + +A more advanced example, showing more of the features that `query_string` supports, of course possible with {es-sql}: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[advancedQueryQuery] +---- + +The query above uses the `_exists_` query to select documents that have values in the `author` field, a range query for `page_count` and +regex and fuzziness queries for the `name` field. + +If one needs to customize various configuration options that `query_string` exposes, this can be done using the second _optional_ parameter. +Multiple settings can be specified separated by a semicolon `;`: + +["source","sql",subs="attributes,callouts,macros"] +---- +include-tagged::{sql-specs}/docs/docs.csv-spec[optionalParameterQuery] +---- + +NOTE: The allowed optional parameters for `QUERY()` are: `allow_leading_wildcard`, `analyze_wildcard`, `analyzer`, +`auto_generate_synonyms_phrase_query`, `default_field`, `default_operator`, `enable_position_increments`, +`escape`, `fuzziness`, `fuzzy_max_expansions`, `fuzzy_prefix_length`, `fuzzy_rewrite`, `fuzzy_transpositions`, +`lenient`, `max_determinized_states`, `minimum_should_match`, `phrase_slop`, `rewrite`, `quote_analyzer`, +`quote_field_suffix`, `tie_breaker`, `time_zone`, `type`. + + [[sql-functions-search-score]] ==== `SCORE` diff --git a/docs/reference/sql/language/data-types.asciidoc b/docs/reference/sql/language/data-types.asciidoc index e59fd528fd80a..42e5c842a4187 100644 --- a/docs/reference/sql/language/data-types.asciidoc +++ b/docs/reference/sql/language/data-types.asciidoc @@ -21,9 +21,9 @@ s|SQL precision | <> | long | BIGINT | 19 | <> | double | DOUBLE | 15 | <> | float | REAL | 7 -| <> | half_float | FLOAT | 16 -| <> | scaled_float | FLOAT | 19 -| <> | keyword | VARCHAR | based on <> +| <> | half_float | FLOAT | 3 +| <> | scaled_float | DOUBLE | 15 +| <> | keyword | VARCHAR | 32,766 | <> | text | VARCHAR | 2,147,483,647 | <> | binary | VARBINARY | 2,147,483,647 | <> | datetime | TIMESTAMP | 24 diff --git a/docs/reference/sql/language/syntax/commands/select.asciidoc b/docs/reference/sql/language/syntax/commands/select.asciidoc index 8e1715d98b15e..26fdb2f337ebc 100644 --- a/docs/reference/sql/language/syntax/commands/select.asciidoc +++ b/docs/reference/sql/language/syntax/commands/select.asciidoc @@ -71,7 +71,6 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[wildcardWithOrder] which essentially returns all(top-level fields, sub-fields, such as multi-fields are ignored] columns found. [[sql-syntax-from]] -[float] ==== FROM Clause The `FROM` clause specifies one table for the `SELECT` and has the following syntax: @@ -111,7 +110,6 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[fromTableAlias] ---- [[sql-syntax-where]] -[float] ==== WHERE Clause The optional `WHERE` clause is used to filter rows from the query and has the following syntax: @@ -133,7 +131,6 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[basicWhere] ---- [[sql-syntax-group-by]] -[float] ==== GROUP BY The `GROUP BY` clause is used to divide the results into groups of rows on matching values from the designated columns. It has the following syntax: @@ -208,7 +205,6 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[groupByAndMultipleAggs] ---- [[sql-syntax-group-by-implicit]] -[float] ===== Implicit Grouping When an aggregation is used without an associated `GROUP BY`, an __implicit grouping__ is applied, meaning all selected rows are considered to form a single default, or implicit group. @@ -229,7 +225,6 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[groupByImplicitMultipleAggs] ---- [[sql-syntax-having]] -[float] ==== HAVING The `HAVING` clause can be used _only_ along aggregate functions (and thus `GROUP BY`) to filter what groups are kept or not and has the following syntax: @@ -263,7 +258,6 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingMultiple] ---- [[sql-syntax-having-group-by-implicit]] -[float] ===== Implicit Grouping As indicated above, it is possible to have a `HAVING` clause without a `GROUP BY`. In this case, the so-called <> is applied, meaning all selected rows are considered to form a single group and `HAVING` can be applied on any of the aggregate functions specified on this group. @@ -285,7 +279,6 @@ include-tagged::{sql-specs}/docs/docs.csv-spec[groupByHavingImplicitMatch] [[sql-syntax-order-by]] -[float] ==== ORDER BY The `ORDER BY` clause is used to sort the results of `SELECT` by one or more expressions: @@ -373,7 +366,6 @@ Trying to return `score` from a non full-text query will return the same value f all are equally relevant. [[sql-syntax-limit]] -[float] ==== LIMIT The `LIMIT` clause restricts (limits) the number of rows returns using the format: diff --git a/docs/reference/upgrade/disable-shard-alloc.asciidoc b/docs/reference/upgrade/disable-shard-alloc.asciidoc index abd40336e9b08..839488f541f51 100644 --- a/docs/reference/upgrade/disable-shard-alloc.asciidoc +++ b/docs/reference/upgrade/disable-shard-alloc.asciidoc @@ -3,17 +3,18 @@ When you shut down a node, the allocation process waits for `index.unassigned.node_left.delayed_timeout` (by default, one minute) before starting to replicate the shards on that node to other nodes in the cluster, which can involve a lot of I/O. Since the node is shortly going to be -restarted, this I/O is unnecessary. You can avoid racing the clock by disabling -allocation before shutting down the node: +restarted, this I/O is unnecessary. You can avoid racing the clock by +<> of replicas before shutting down +the node: [source,js] -------------------------------------------------- PUT _cluster/settings { "persistent": { - "cluster.routing.allocation.enable": "none" + "cluster.routing.allocation.enable": "primaries" } } -------------------------------------------------- // CONSOLE -// TEST[skip:indexes don't assign] \ No newline at end of file +// TEST[skip:indexes don't assign] diff --git a/docs/ruby/client.asciidoc b/docs/ruby/client.asciidoc index 2037ae1a0b280..074c77d41b03b 100644 --- a/docs/ruby/client.asciidoc +++ b/docs/ruby/client.asciidoc @@ -1,3 +1,4 @@ +[[ruby_client]] == The Ruby Client The `elasticsearch` http://rubygems.org/gems/elasticsearch[Rubygem] provides a low-level client diff --git a/docs/ruby/copyright.asciidoc b/docs/ruby/copyright.asciidoc index 3747cc572e40f..8a84be27636f4 100644 --- a/docs/ruby/copyright.asciidoc +++ b/docs/ruby/copyright.asciidoc @@ -1,3 +1,4 @@ +[[copyright]] == Copyright and License This software is Copyright (c) 2013-2018 by Elasticsearch BV. diff --git a/docs/ruby/model.asciidoc b/docs/ruby/model.asciidoc index 0b0be45708fa8..62339bb239149 100644 --- a/docs/ruby/model.asciidoc +++ b/docs/ruby/model.asciidoc @@ -1,3 +1,4 @@ +[[activemodel_activerecord]] == ActiveModel / ActiveRecord The `elasticsearch-model` http://rubygems.org/gems/elasticsearch-model[Rubygem] diff --git a/docs/ruby/persistence.asciidoc b/docs/ruby/persistence.asciidoc index 7d361978ee703..5306dae47c661 100644 --- a/docs/ruby/persistence.asciidoc +++ b/docs/ruby/persistence.asciidoc @@ -1,3 +1,4 @@ +[[persistence]] == Persistence The `elasticsearch-persistence` http://rubygems.org/gems/elasticsearch-persistence[Rubygem] diff --git a/docs/ruby/rails.asciidoc b/docs/ruby/rails.asciidoc index 1fef3f42381a6..213258c7e2266 100644 --- a/docs/ruby/rails.asciidoc +++ b/docs/ruby/rails.asciidoc @@ -1,3 +1,4 @@ +[[ruby_on_rails]] == Ruby On Rails The `elasticsearch-rails` http://rubygems.org/gems/elasticsearch-rails[Rubygem] diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index b4a6c49754869..bbc6a64dcdb2b 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -19,11 +19,12 @@ package org.elasticsearch.smoketest; -import org.apache.http.HttpHost; -import org.apache.lucene.util.BytesRef; - import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; +import org.apache.http.HttpHost; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.TimeUnits; import org.elasticsearch.Version; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.ParseField; @@ -48,12 +49,13 @@ import java.util.List; import java.util.Map; -import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; - import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +//The default 20 minutes timeout isn't always enough, please do not increase further than 30 before analyzing what makes this suite so slow +@TimeoutSuite(millis = 30 * TimeUnits.MINUTE) public class DocsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { public DocsClientYamlTestSuiteIT(@Name("yaml") ClientYamlTestCandidate testCandidate) { diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java index e02f9f176246e..adcbf6ef1bee0 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/XContentSubParser.java @@ -25,7 +25,7 @@ import java.util.Map; /** - * Wrapper for a XContentParser that makes a single object to look like a complete document. + * Wrapper for a XContentParser that makes a single object/array look like a complete document. * * The wrapper prevents the parsing logic to consume tokens outside of the wrapped object as well * as skipping to the end of the object in case of a parsing error. The wrapper is intended to be @@ -39,8 +39,8 @@ public class XContentSubParser implements XContentParser { public XContentSubParser(XContentParser parser) { this.parser = parser; - if (parser.currentToken() != Token.START_OBJECT) { - throw new IllegalStateException("The sub parser has to be created on the start of an object"); + if (parser.currentToken() != Token.START_OBJECT && parser.currentToken() != Token.START_ARRAY) { + throw new IllegalStateException("The sub parser has to be created on the start of an object or array"); } level = 1; } diff --git a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java index 51bb5c3c65f6d..fa6ffdd0407f9 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java +++ b/libs/x-content/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java @@ -151,6 +151,12 @@ public int intValue(boolean coerce) throws IOException { protected abstract int doIntValue() throws IOException; + private static BigInteger LONG_MAX_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MAX_VALUE); + private static BigInteger LONG_MIN_VALUE_AS_BIGINTEGER = BigInteger.valueOf(Long.MIN_VALUE); + // weak bounds on the BigDecimal representation to allow for coercion + private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); + private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + /** Return the long that {@code stringValue} stores or throws an exception if the * stored value cannot be converted to a long that stores the exact same * value and {@code coerce} is false. */ @@ -163,7 +169,11 @@ private static long toLong(String stringValue, boolean coerce) { final BigInteger bigIntegerValue; try { - BigDecimal bigDecimalValue = new BigDecimal(stringValue); + final BigDecimal bigDecimalValue = new BigDecimal(stringValue); + if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0 || + bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); } catch (ArithmeticException e) { throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); @@ -171,11 +181,11 @@ private static long toLong(String stringValue, boolean coerce) { throw new IllegalArgumentException("For input string: \"" + stringValue + "\""); } - if (bigIntegerValue.compareTo(BigInteger.valueOf(Long.MAX_VALUE)) > 0 || - bigIntegerValue.compareTo(BigInteger.valueOf(Long.MIN_VALUE)) < 0) { + if (bigIntegerValue.compareTo(LONG_MAX_VALUE_AS_BIGINTEGER) > 0 || bigIntegerValue.compareTo(LONG_MIN_VALUE_AS_BIGINTEGER) < 0) { throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); } + assert bigIntegerValue.longValueExact() <= Long.MAX_VALUE; // asserting that no ArithmeticException is thrown return bigIntegerValue.longValue(); } diff --git a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 5dbe7be40f312..0fe8a2b9f91fb 100644 --- a/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/libs/x-content/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.common.xcontent; import com.fasterxml.jackson.core.JsonParseException; + import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -329,7 +330,7 @@ public void testNestedMapInList() throws IOException { } } - public void testSubParser() throws IOException { + public void testSubParserObject() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); int numberOfTokens; numberOfTokens = generateRandomObjectForMarking(builder); @@ -354,6 +355,7 @@ public void testSubParser() throws IOException { // And sometimes skipping children subParser.skipChildren(); } + } finally { assertFalse(subParser.isClosed()); subParser.close(); @@ -367,6 +369,50 @@ public void testSubParser() throws IOException { } } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/40617") + public void testSubParserArray() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + int numberOfArrayElements = randomInt(10); + builder.startObject(); + builder.field("array"); + builder.startArray(); + int numberOfTokens = 0; + for (int i = 0; i < numberOfArrayElements; ++i) { + numberOfTokens += generateRandomObjectForMarking(builder); + } + builder.endArray(); + builder.endObject(); + + String content = Strings.toString(builder); + + try (XContentParser parser = createParser(JsonXContent.jsonXContent, content)) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // array field + assertEquals("array", parser.currentName()); + assertEquals(XContentParser.Token.START_ARRAY, parser.nextToken()); // [ + XContentParser subParser = new XContentSubParser(parser); + try { + int tokensToSkip = randomInt(numberOfTokens - 1); + for (int i = 0; i < tokensToSkip; i++) { + // Simulate incomplete parsing + assertNotNull(subParser.nextToken()); + } + if (randomBoolean()) { + // And sometimes skipping children + subParser.skipChildren(); + } + + } finally { + assertFalse(subParser.isClosed()); + subParser.close(); + assertTrue(subParser.isClosed()); + } + assertEquals(XContentParser.Token.END_ARRAY, parser.currentToken()); + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + } + public void testCreateSubParserAtAWrongPlace() throws IOException { XContentBuilder builder = XContentFactory.jsonBuilder(); generateRandomObjectForMarking(builder); @@ -377,7 +423,7 @@ public void testCreateSubParserAtAWrongPlace() throws IOException { assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); // first field assertEquals("first_field", parser.currentName()); IllegalStateException exception = expectThrows(IllegalStateException.class, () -> new XContentSubParser(parser)); - assertEquals("The sub parser has to be created on the start of an object", exception.getMessage()); + assertEquals("The sub parser has to be created on the start of an object or array", exception.getMessage()); } } diff --git a/modules/ingest-common/build.gradle b/modules/ingest-common/build.gradle index 1681258e7c7ee..b6179eb852ae9 100644 --- a/modules/ingest-common/build.gradle +++ b/modules/ingest-common/build.gradle @@ -29,9 +29,6 @@ dependencies { compile project(':libs:dissect') } -compileJava.options.compilerArgs << "-Xlint:-unchecked,-rawtypes" -compileTestJava.options.compilerArgs << "-Xlint:-unchecked,-rawtypes" - integTestCluster { module project(':modules:lang-painless') } \ No newline at end of file diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java index 792e5e4ebed2d..546519aa5f606 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/AbstractStringProcessor.java @@ -80,8 +80,8 @@ protected Factory(String processorType) { } @Override - public AbstractStringProcessor create(Map registry, String tag, - Map config) throws Exception { + public AbstractStringProcessor create(Map registry, String tag, + Map config) throws Exception { String field = ConfigurationUtils.readStringProperty(processorType, tag, config, "field"); boolean ignoreMissing = ConfigurationUtils.readBooleanProperty(processorType, tag, config, "ignore_missing", false); String targetField = ConfigurationUtils.readStringProperty(processorType, tag, config, "target_field", field); @@ -89,7 +89,7 @@ public AbstractStringProcessor create(Map registry, S return newProcessor(tag, config, field, ignoreMissing, targetField); } - protected abstract AbstractStringProcessor newProcessor(String processorTag, Map config, String field, - boolean ignoreMissing, String targetField); + protected abstract AbstractStringProcessor newProcessor(String processorTag, Map config, String field, + boolean ignoreMissing, String targetField); } } diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java index d07b56e1b3df5..8de75878f5fe5 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/BytesProcessor.java @@ -27,7 +27,7 @@ * Processor that converts the content of string fields to the byte value. * Throws exception is the field is not of type string or can not convert to the numeric byte value */ -public final class BytesProcessor extends AbstractStringProcessor { +public final class BytesProcessor extends AbstractStringProcessor { public static final String TYPE = "bytes"; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java index 39553910692fc..9f3e656bba4b6 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GsubProcessor.java @@ -29,7 +29,7 @@ * Processor that allows to search for patterns in field content and replace them with corresponding string replacement. * Support fields of string type only, throws exception if a field is of a different type. */ -public final class GsubProcessor extends AbstractStringProcessor { +public final class GsubProcessor extends AbstractStringProcessor { public static final String TYPE = "gsub"; @@ -67,8 +67,8 @@ public Factory() { } @Override - protected AbstractStringProcessor newProcessor(String processorTag, Map config, String field, - boolean ignoreMissing, String targetField) { + protected GsubProcessor newProcessor(String processorTag, Map config, String field, + boolean ignoreMissing, String targetField) { String pattern = readStringProperty(TYPE, processorTag, config, "pattern"); String replacement = readStringProperty(TYPE, processorTag, config, "replacement"); Pattern searchPattern; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java index 4269cb05257f5..6c14dbdabba78 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/LowercaseProcessor.java @@ -27,7 +27,7 @@ * Throws exception is the field is not of type string. */ -public final class LowercaseProcessor extends AbstractStringProcessor { +public final class LowercaseProcessor extends AbstractStringProcessor { public static final String TYPE = "lowercase"; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java index 98fe1223e5391..d1b3c87785424 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/TrimProcessor.java @@ -25,7 +25,7 @@ * Processor that trims the content of string fields. * Throws exception is the field is not of type string. */ -public final class TrimProcessor extends AbstractStringProcessor { +public final class TrimProcessor extends AbstractStringProcessor { public static final String TYPE = "trim"; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/URLDecodeProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/URLDecodeProcessor.java index fb6c5acf98b24..fa9d377714ee9 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/URLDecodeProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/URLDecodeProcessor.java @@ -26,7 +26,7 @@ /** * Processor that URL-decodes a string */ -public final class URLDecodeProcessor extends AbstractStringProcessor { +public final class URLDecodeProcessor extends AbstractStringProcessor { public static final String TYPE = "urldecode"; diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java index 6c428627c7d72..4503bfc02f71e 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/UppercaseProcessor.java @@ -26,7 +26,7 @@ * Processor that converts the content of string fields to uppercase. * Throws exception is the field is not of type string. */ -public final class UppercaseProcessor extends AbstractStringProcessor { +public final class UppercaseProcessor extends AbstractStringProcessor { public static final String TYPE = "uppercase"; diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java index 0465e24902842..ba6a2be73465e 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorFactoryTestCase.java @@ -37,7 +37,7 @@ protected Map modifyConfig(Map config) { return config; } - protected void assertProcessor(AbstractStringProcessor processor) {} + protected void assertProcessor(AbstractStringProcessor processor) {} public void testCreate() throws Exception { AbstractStringProcessor.Factory factory = newFactory(); @@ -47,7 +47,7 @@ public void testCreate() throws Exception { Map config = new HashMap<>(); config.put("field", fieldName); - AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); + AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo(fieldName)); assertThat(processor.isIgnoreMissing(), is(false)); @@ -64,7 +64,7 @@ public void testCreateWithIgnoreMissing() throws Exception { config.put("field", fieldName); config.put("ignore_missing", true); - AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); + AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo(fieldName)); assertThat(processor.isIgnoreMissing(), is(true)); @@ -82,7 +82,7 @@ public void testCreateWithTargetField() throws Exception { config.put("field", fieldName); config.put("target_field", targetFieldName); - AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); + AbstractStringProcessor processor = factory.create(null, processorTag, modifyConfig(config)); assertThat(processor.getTag(), equalTo(processorTag)); assertThat(processor.getField(), equalTo(fieldName)); assertThat(processor.isIgnoreMissing(), is(false)); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java index 4e4182bfdc891..f667f84e5d7b1 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/AbstractStringProcessorTestCase.java @@ -33,7 +33,7 @@ public abstract class AbstractStringProcessorTestCase extends ESTestCase { - protected abstract AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField); + protected abstract AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField); protected String modifyInput(String input) { return input; @@ -41,8 +41,8 @@ protected String modifyInput(String input) { protected abstract T expectedResult(String input); - protected Class expectedResultType(){ - return (Class) String.class; // most results types are Strings + protected Class expectedResultType(){ + return String.class; // most results types are Strings } public void testProcessor() throws Exception { diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java index 788340a455a42..2520f3e5ad17f 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/BytesProcessorTests.java @@ -29,12 +29,12 @@ import static org.hamcrest.Matchers.equalTo; -public class BytesProcessorTests extends AbstractStringProcessorTestCase { +public class BytesProcessorTests extends AbstractStringProcessorTestCase { private String modifiedInput; @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new BytesProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java index 4a70b4686e0a6..0dadefbb4ee64 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorFactoryTests.java @@ -42,7 +42,7 @@ protected Map modifyConfig(Map config) { } @Override - protected void assertProcessor(AbstractStringProcessor processor) { + protected void assertProcessor(AbstractStringProcessor processor) { GsubProcessor gsubProcessor = (GsubProcessor) processor; assertThat(gsubProcessor.getPattern().toString(), equalTo("\\.")); assertThat(gsubProcessor.getReplacement(), equalTo("-")); diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorTests.java index 38d0202d3a1e2..9c003356c3dff 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/GsubProcessorTests.java @@ -21,10 +21,10 @@ import java.util.regex.Pattern; -public class GsubProcessorTests extends AbstractStringProcessorTestCase { +public class GsubProcessorTests extends AbstractStringProcessorTestCase { @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new GsubProcessor(randomAlphaOfLength(10), field, Pattern.compile("\\."), "-", ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java index 67a73669c0387..b804d3a0221c2 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/LowercaseProcessorTests.java @@ -21,9 +21,9 @@ import java.util.Locale; -public class LowercaseProcessorTests extends AbstractStringProcessorTestCase { +public class LowercaseProcessorTests extends AbstractStringProcessorTestCase { @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new LowercaseProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java index f0ae554f5cad1..abd7cae12fe91 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/TrimProcessorTests.java @@ -19,10 +19,10 @@ package org.elasticsearch.ingest.common; -public class TrimProcessorTests extends AbstractStringProcessorTestCase { +public class TrimProcessorTests extends AbstractStringProcessorTestCase { @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new TrimProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/URLDecodeProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/URLDecodeProcessorTests.java index 7697f1fcba3d4..150d594afd9af 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/URLDecodeProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/URLDecodeProcessorTests.java @@ -22,14 +22,14 @@ import java.io.UnsupportedEncodingException; import java.net.URLDecoder; -public class URLDecodeProcessorTests extends AbstractStringProcessorTestCase { +public class URLDecodeProcessorTests extends AbstractStringProcessorTestCase { @Override protected String modifyInput(String input) { return "Hello%20G%C3%BCnter" + input; } @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new URLDecodeProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java index 76459f8116890..1b027c4380837 100644 --- a/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java +++ b/modules/ingest-common/src/test/java/org/elasticsearch/ingest/common/UppercaseProcessorTests.java @@ -21,10 +21,10 @@ import java.util.Locale; -public class UppercaseProcessorTests extends AbstractStringProcessorTestCase { +public class UppercaseProcessorTests extends AbstractStringProcessorTestCase { @Override - protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { + protected AbstractStringProcessor newProcessor(String field, boolean ignoreMissing, String targetField) { return new UppercaseProcessor(randomAlphaOfLength(10), field, ignoreMissing, targetField); } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt index 3d7b29826c747..03ec9275aa8b7 100644 --- a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.score.txt @@ -19,11 +19,14 @@ # This file contains a whitelist for functions to be used in Score context +class org.elasticsearch.script.ScoreScript no_import { +} + static_import { double saturation(double, double) from_class org.elasticsearch.script.ScoreScriptUtils double sigmoid(double, double, double) from_class org.elasticsearch.script.ScoreScriptUtils - double randomReproducible(String, int) from_class org.elasticsearch.script.ScoreScriptUtils - double randomNotReproducible() bound_to org.elasticsearch.script.ScoreScriptUtils$RandomNotReproducible + double randomScore(org.elasticsearch.script.ScoreScript, int, String) bound_to org.elasticsearch.script.ScoreScriptUtils$RandomScoreField + double randomScore(org.elasticsearch.script.ScoreScript, int) bound_to org.elasticsearch.script.ScoreScriptUtils$RandomScoreDoc double decayGeoLinear(String, String, String, double, GeoPoint) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayGeoLinear double decayGeoExp(String, String, String, double, GeoPoint) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayGeoExp double decayGeoGauss(String, String, String, double, GeoPoint) bound_to org.elasticsearch.script.ScoreScriptUtils$DecayGeoGauss diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml index a3135777c952c..cf55810058d92 100644 --- a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/80_script_score.yml @@ -72,61 +72,6 @@ setup: - match: { hits.hits.1._id: d2 } - match: { hits.hits.2._id: d1 } ---- -"Random functions": - - do: - indices.create: - index: test - body: - settings: - number_of_shards: 2 - mappings: - properties: - f1: - type: keyword - - do: - index: - index: test - id: 1 - body: {"f1": "v1"} - - do: - index: - index: test - id: 2 - body: {"f1": "v2"} - - do: - index: - index: test - id: 3 - body: {"f1": "v3"} - - - do: - indices.refresh: {} - - - do: - search: - rest_total_hits_as_int: true - index: test - body: - query: - script_score: - query: {match_all: {} } - script: - source: "randomReproducible(Long.toString(doc['_seq_no'].value), 100)" - - match: { hits.total: 3 } - - - do: - search: - rest_total_hits_as_int: true - index: test - body: - query: - script_score: - query: {match_all: {} } - script: - source: "randomNotReproducible()" - - match: { hits.total: 3 } - --- "Decay geo functions": - do: diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/85_script_score_random_score.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/85_script_score_random_score.yml new file mode 100644 index 0000000000000..2879d50fedebc --- /dev/null +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/85_script_score_random_score.yml @@ -0,0 +1,146 @@ +# Integration tests for ScriptScoreQuery using Painless + +setup: +- skip: + version: " - 7.99.99" # correct to 7.09.99 after backporting to 7.1 + reason: "random score function of script score was added in 7.1" + +--- +"Random score function with _seq_no field": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 2 + mappings: + properties: + f1: + type: keyword + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test"}}' + - '{"f1": "v0"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v1"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v2"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v3"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v4"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v5"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v6"}' + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "randomScore(100, '_seq_no')" + # stash ids to check for reproducibility of ranking + - set: { hits.hits.0._id: id0 } + - set: { hits.hits.1._id: id1 } + - set: { hits.hits.2._id: id2 } + - set: { hits.hits.3._id: id3 } + - set: { hits.hits.4._id: id4 } + - set: { hits.hits.5._id: id5 } + - set: { hits.hits.6._id: id6 } + + # check that ranking is reproducible + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "randomScore(100, '_seq_no')" + - match: { hits.hits.0._id: $id0 } + - match: { hits.hits.1._id: $id1 } + - match: { hits.hits.2._id: $id2 } + - match: { hits.hits.3._id: $id3 } + - match: { hits.hits.4._id: $id4 } + - match: { hits.hits.5._id: $id5 } + - match: { hits.hits.6._id: $id6 } + +--- +"Random score function with internal doc Ids": + - do: + indices.create: + index: test + body: + settings: + number_of_shards: 1 + mappings: + properties: + f1: + type: keyword + + - do: + bulk: + refresh: true + body: + - '{"index": {"_index": "test"}}' + - '{"f1": "v0"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v1"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v2"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v3"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v4"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v5"}' + - '{"index": {"_index": "test"}}' + - '{"f1": "v6"}' + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "randomScore(100)" + # stash ids to check for reproducibility of ranking + - set: { hits.hits.0._id: id0 } + - set: { hits.hits.1._id: id1 } + - set: { hits.hits.2._id: id2 } + - set: { hits.hits.3._id: id3 } + - set: { hits.hits.4._id: id4 } + - set: { hits.hits.5._id: id5 } + - set: { hits.hits.6._id: id6 } + + # check that ranking is reproducible + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + script_score: + query: {match_all: {} } + script: + source: "randomScore(100)" + - match: { hits.hits.0._id: $id0 } + - match: { hits.hits.1._id: $id1 } + - match: { hits.hits.2._id: $id2 } + - match: { hits.hits.3._id: $id3 } + - match: { hits.hits.4._id: $id4 } + - match: { hits.hits.5._id: $id5 } + - match: { hits.hits.6._id: $id6 } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java index f4a61c3ebd358..d48a457ba08cd 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/DenseVectorFieldMapper.java @@ -46,7 +46,7 @@ public class DenseVectorFieldMapper extends FieldMapper implements ArrayValueMapperParser { public static final String CONTENT_TYPE = "dense_vector"; - public static short MAX_DIMS_COUNT = 500; //maximum allowed number of dimensions + public static short MAX_DIMS_COUNT = 1024; //maximum allowed number of dimensions private static final byte INT_BYTES = 4; public static class Defaults { @@ -169,10 +169,9 @@ public void parse(ParseContext context) throws IOException { buf[offset+2] = (byte) (intValue >> 8); buf[offset+3] = (byte) intValue; offset += INT_BYTES; - dim++; - if (dim >= MAX_DIMS_COUNT) { + if (dim++ >= MAX_DIMS_COUNT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + - "] has exceeded the maximum allowed number of dimensions of :[" + MAX_DIMS_COUNT + "]"); + "] has exceeded the maximum allowed number of dimensions of [" + MAX_DIMS_COUNT + "]"); } } BinaryDocValuesField field = new BinaryDocValuesField(fieldType().name(), new BytesRef(buf, 0, offset)); diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java index cbafd0fd1efff..45a067d7994d2 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/MapperExtrasPlugin.java @@ -41,6 +41,7 @@ public Map getMappers() { mappers.put(RankFeaturesFieldMapper.CONTENT_TYPE, new RankFeaturesFieldMapper.TypeParser()); mappers.put(DenseVectorFieldMapper.CONTENT_TYPE, new DenseVectorFieldMapper.TypeParser()); mappers.put(SparseVectorFieldMapper.CONTENT_TYPE, new SparseVectorFieldMapper.TypeParser()); + mappers.put(SearchAsYouTypeFieldMapper.CONTENT_TYPE, new SearchAsYouTypeFieldMapper.TypeParser()); return Collections.unmodifiableMap(mappers); } diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java new file mode 100644 index 0000000000000..867e975e9f51c --- /dev/null +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapper.java @@ -0,0 +1,836 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.AnalyzerWrapper; +import org.apache.lucene.analysis.CachingTokenFilter; +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; +import org.apache.lucene.analysis.shingle.FixedShingleFilter; +import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.AutomatonQuery; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.NormsFieldExistsQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.spans.FieldMaskingSpanQuery; +import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; +import org.apache.lucene.search.spans.SpanQuery; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.apache.lucene.util.automaton.Automata; +import org.apache.lucene.util.automaton.Automaton; +import org.apache.lucene.util.automaton.Operations; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.analysis.AnalyzerScope; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.query.QueryShardContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeIntegerValue; +import static org.elasticsearch.index.mapper.TextFieldMapper.TextFieldType.hasGaps; +import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; + +/** + * Mapper for a text field that optimizes itself for as-you-type completion by indexing its content into subfields. Each subfield + * modifies the analysis chain of the root field to index terms the user would create as they type out the value in the root field + * + * The structure of these fields is + * + *

+ *     [ SearchAsYouTypeFieldMapper, SearchAsYouTypeFieldType, unmodified analysis ]
+ *     ├── [ ShingleFieldMapper, ShingleFieldType, analysis wrapped with 2-shingles ]
+ *     ├── ...
+ *     ├── [ ShingleFieldMapper, ShingleFieldType, analysis wrapped with max_shingle_size-shingles ]
+ *     └── [ PrefixFieldMapper, PrefixFieldType, analysis wrapped with max_shingle_size-shingles and edge-ngrams ]
+ * 
+ */ +public class SearchAsYouTypeFieldMapper extends FieldMapper { + + public static final String CONTENT_TYPE = "search_as_you_type"; + private static final int MAX_SHINGLE_SIZE_LOWER_BOUND = 2; + private static final int MAX_SHINGLE_SIZE_UPPER_BOUND = 4; + private static final String PREFIX_FIELD_SUFFIX = "._index_prefix"; + + public static class Defaults { + + public static final int MIN_GRAM = 1; + public static final int MAX_GRAM = 20; + public static final int MAX_SHINGLE_SIZE = 3; + + public static final MappedFieldType FIELD_TYPE = new SearchAsYouTypeFieldType(); + + static { + FIELD_TYPE.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS); + FIELD_TYPE.freeze(); + } + } + + public static class TypeParser implements Mapper.TypeParser { + + @Override + public Mapper.Builder parse(String name, + Map node, + ParserContext parserContext) throws MapperParsingException { + + final Builder builder = new Builder(name); + + builder.fieldType().setIndexAnalyzer(parserContext.getIndexAnalyzers().getDefaultIndexAnalyzer()); + builder.fieldType().setSearchAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchAnalyzer()); + builder.fieldType().setSearchQuoteAnalyzer(parserContext.getIndexAnalyzers().getDefaultSearchQuoteAnalyzer()); + parseTextField(builder, name, node, parserContext); + for (Iterator> iterator = node.entrySet().iterator(); iterator.hasNext();) { + final Map.Entry entry = iterator.next(); + final String fieldName = entry.getKey(); + final Object fieldNode = entry.getValue(); + + if (fieldName.equals("max_shingle_size")) { + builder.maxShingleSize(nodeIntegerValue(fieldNode)); + iterator.remove(); + } + // TODO should we allow to configure the prefix field + } + return builder; + } + } + + public static class Builder extends FieldMapper.Builder { + private int maxShingleSize = Defaults.MAX_SHINGLE_SIZE; + + public Builder(String name) { + super(name, Defaults.FIELD_TYPE, Defaults.FIELD_TYPE); + this.builder = this; + } + + public Builder maxShingleSize(int maxShingleSize) { + if (maxShingleSize < MAX_SHINGLE_SIZE_LOWER_BOUND || maxShingleSize > MAX_SHINGLE_SIZE_UPPER_BOUND) { + throw new MapperParsingException("[max_shingle_size] must be at least [" + MAX_SHINGLE_SIZE_LOWER_BOUND + "] and at most " + + "[" + MAX_SHINGLE_SIZE_UPPER_BOUND + "], got [" + maxShingleSize + "]"); + } + this.maxShingleSize = maxShingleSize; + return builder; + } + + @Override + public SearchAsYouTypeFieldType fieldType() { + return (SearchAsYouTypeFieldType) this.fieldType; + } + + @Override + public SearchAsYouTypeFieldMapper build(Mapper.BuilderContext context) { + setupFieldType(context); + + final NamedAnalyzer indexAnalyzer = fieldType().indexAnalyzer(); + final NamedAnalyzer searchAnalyzer = fieldType().searchAnalyzer(); + final NamedAnalyzer searchQuoteAnalyzer = fieldType().searchQuoteAnalyzer(); + + // set up the prefix field + final String prefixFieldName = name() + PREFIX_FIELD_SUFFIX; + final PrefixFieldType prefixFieldType = new PrefixFieldType(name(), prefixFieldName, Defaults.MIN_GRAM, Defaults.MAX_GRAM); + prefixFieldType.setIndexOptions(fieldType().indexOptions()); + // wrap the root field's index analyzer with shingles and edge ngrams + final SearchAsYouTypeAnalyzer prefixIndexWrapper = + SearchAsYouTypeAnalyzer.withShingleAndPrefix(indexAnalyzer.analyzer(), maxShingleSize); + // wrap the root field's search analyzer with only shingles + final SearchAsYouTypeAnalyzer prefixSearchWrapper = + SearchAsYouTypeAnalyzer.withShingle(searchAnalyzer.analyzer(), maxShingleSize); + // don't wrap the root field's search quote analyzer as prefix field doesn't support phrase queries + prefixFieldType.setIndexAnalyzer(new NamedAnalyzer(indexAnalyzer.name(), AnalyzerScope.INDEX, prefixIndexWrapper)); + prefixFieldType.setSearchAnalyzer(new NamedAnalyzer(searchAnalyzer.name(), AnalyzerScope.INDEX, prefixSearchWrapper)); + final PrefixFieldMapper prefixFieldMapper = new PrefixFieldMapper(prefixFieldType, context.indexSettings()); + + // set up the shingle fields + final ShingleFieldMapper[] shingleFieldMappers = new ShingleFieldMapper[maxShingleSize - 1]; + final ShingleFieldType[] shingleFieldTypes = new ShingleFieldType[maxShingleSize - 1]; + for (int i = 0; i < shingleFieldMappers.length; i++) { + final int shingleSize = i + 2; + final ShingleFieldType shingleFieldType = new ShingleFieldType(fieldType(), shingleSize); + shingleFieldType.setName(getShingleFieldName(name(), shingleSize)); + // wrap the root field's index, search, and search quote analyzers with shingles + final SearchAsYouTypeAnalyzer shingleIndexWrapper = + SearchAsYouTypeAnalyzer.withShingle(indexAnalyzer.analyzer(), shingleSize); + final SearchAsYouTypeAnalyzer shingleSearchWrapper = + SearchAsYouTypeAnalyzer.withShingle(searchAnalyzer.analyzer(), shingleSize); + final SearchAsYouTypeAnalyzer shingleSearchQuoteWrapper = + SearchAsYouTypeAnalyzer.withShingle(searchQuoteAnalyzer.analyzer(), shingleSize); + shingleFieldType.setIndexAnalyzer(new NamedAnalyzer(indexAnalyzer.name(), AnalyzerScope.INDEX, shingleIndexWrapper)); + shingleFieldType.setSearchAnalyzer(new NamedAnalyzer(searchAnalyzer.name(), AnalyzerScope.INDEX, shingleSearchWrapper)); + shingleFieldType.setSearchQuoteAnalyzer( + new NamedAnalyzer(searchQuoteAnalyzer.name(), AnalyzerScope.INDEX, shingleSearchQuoteWrapper)); + shingleFieldType.setPrefixFieldType(prefixFieldType); + shingleFieldTypes[i] = shingleFieldType; + shingleFieldMappers[i] = new ShingleFieldMapper(shingleFieldType, context.indexSettings()); + } + fieldType().setPrefixField(prefixFieldType); + fieldType().setShingleFields(shingleFieldTypes); + return new SearchAsYouTypeFieldMapper(name, fieldType(), context.indexSettings(), copyTo, + maxShingleSize, prefixFieldMapper, shingleFieldMappers); + } + } + + private static int countPosition(TokenStream stream) throws IOException { + assert stream instanceof CachingTokenFilter; + PositionIncrementAttribute posIncAtt = stream.getAttribute(PositionIncrementAttribute.class); + stream.reset(); + int positionCount = 0; + while (stream.incrementToken()) { + if (posIncAtt.getPositionIncrement() != 0) { + positionCount += posIncAtt.getPositionIncrement(); + } + } + return positionCount; + } + + /** + * The root field type, which most queries should target as it will delegate queries to subfields better optimized for the query. When + * handling phrase queries, it analyzes the query text to find the appropriate sized shingle subfield to delegate to. When handling + * prefix or phrase prefix queries, it delegates to the prefix subfield + */ + static class SearchAsYouTypeFieldType extends StringFieldType { + + PrefixFieldType prefixField; + ShingleFieldType[] shingleFields = new ShingleFieldType[0]; + + SearchAsYouTypeFieldType() { + setTokenized(true); + } + + SearchAsYouTypeFieldType(SearchAsYouTypeFieldType other) { + super(other); + + if (other.prefixField != null) { + this.prefixField = other.prefixField.clone(); + } + if (other.shingleFields != null) { + this.shingleFields = new ShingleFieldType[other.shingleFields.length]; + for (int i = 0; i < this.shingleFields.length; i++) { + if (other.shingleFields[i] != null) { + this.shingleFields[i] = other.shingleFields[i].clone(); + } + } + } + } + + public void setPrefixField(PrefixFieldType prefixField) { + checkIfFrozen(); + this.prefixField = prefixField; + } + + public void setShingleFields(ShingleFieldType[] shingleFields) { + checkIfFrozen(); + this.shingleFields = shingleFields; + } + + @Override + public MappedFieldType clone() { + return new SearchAsYouTypeFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + private ShingleFieldType shingleFieldForPositions(int positions) { + final int indexFromShingleSize = Math.max(positions - 2, 0); + return shingleFields[Math.min(indexFromShingleSize, shingleFields.length - 1)]; + } + + @Override + public Query existsQuery(QueryShardContext context) { + if (omitNorms()) { + return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); + } + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + if (prefixField == null || prefixField.termLengthWithinBounds(value.length()) == false) { + return super.prefixQuery(value, method, context); + } else { + final Query query = prefixField.prefixQuery(value, method, context); + if (method == null + || method == MultiTermQuery.CONSTANT_SCORE_REWRITE + || method == MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE) { + return new ConstantScoreQuery(query); + } else { + return query; + } + } + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + int numPos = countPosition(stream); + if (shingleFields.length == 0 || slop > 0 || hasGaps(stream) || numPos <= 1) { + return TextFieldMapper.createPhraseQuery(stream, name(), slop, enablePositionIncrements); + } + final ShingleFieldType shingleField = shingleFieldForPositions(numPos); + stream = new FixedShingleFilter(stream, shingleField.shingleSize); + return shingleField.phraseQuery(stream, 0, true); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + int numPos = countPosition(stream); + if (shingleFields.length == 0 || slop > 0 || hasGaps(stream) || numPos <= 1) { + return TextFieldMapper.createPhraseQuery(stream, name(), slop, enablePositionIncrements); + } + final ShingleFieldType shingleField = shingleFieldForPositions(numPos); + stream = new FixedShingleFilter(stream, shingleField.shingleSize); + return shingleField.multiPhraseQuery(stream, 0, true); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { + int numPos = countPosition(stream); + if (shingleFields.length == 0 || slop > 0 || hasGaps(stream) || numPos <= 1) { + return TextFieldMapper.createPhrasePrefixQuery(stream, name(), slop, maxExpansions, + null, null); + } + final ShingleFieldType shingleField = shingleFieldForPositions(numPos); + stream = new FixedShingleFilter(stream, shingleField.shingleSize); + return shingleField.phrasePrefixQuery(stream, 0, maxExpansions); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + if (prefixField != null && prefixField.termLengthWithinBounds(value.length())) { + return new FieldMaskingSpanQuery(new SpanTermQuery(new Term(prefixField.name(), indexedValueForSearch(value))), name()); + } else { + SpanMultiTermQueryWrapper spanMulti = + new SpanMultiTermQueryWrapper<>(new PrefixQuery(new Term(name(), indexedValueForSearch(value)))); + spanMulti.setRewriteMethod(method); + return spanMulti; + } + } + + @Override + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); + final SearchAsYouTypeFieldType otherFieldType = (SearchAsYouTypeFieldType) other; + if (this.shingleFields.length != otherFieldType.shingleFields.length) { + conflicts.add("mapper [" + name() + "] has a different [max_shingle_size]"); + } else if (Arrays.equals(this.shingleFields, otherFieldType.shingleFields) == false) { + conflicts.add("mapper [" + name() + "] has shingle subfields that are configured differently"); + } + + if (Objects.equals(this.prefixField, otherFieldType.prefixField) == false) { + conflicts.add("mapper [" + name() + "] has different [index_prefixes] settings"); + } + } + + @Override + public boolean equals(Object otherObject) { + if (this == otherObject) { + return true; + } + if (otherObject == null || getClass() != otherObject.getClass()) { + return false; + } + if (!super.equals(otherObject)) { + return false; + } + final SearchAsYouTypeFieldType other = (SearchAsYouTypeFieldType) otherObject; + return Objects.equals(prefixField, other.prefixField) && + Arrays.equals(shingleFields, other.shingleFields); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), prefixField, Arrays.hashCode(shingleFields)); + } + } + + /** + * The prefix field type handles prefix and phrase prefix queries that are delegated to it by the other field types in a + * search_as_you_type structure + */ + static final class PrefixFieldType extends StringFieldType { + + final int minChars; + final int maxChars; + final String parentField; + + PrefixFieldType(String parentField, String name, int minChars, int maxChars) { + setTokenized(true); + setOmitNorms(true); + setStored(false); + setName(name); + this.minChars = minChars; + this.maxChars = maxChars; + this.parentField = parentField; + } + + PrefixFieldType(PrefixFieldType other) { + super(other); + this.minChars = other.minChars; + this.maxChars = other.maxChars; + this.parentField = other.parentField; + } + + boolean termLengthWithinBounds(int length) { + return length >= minChars - 1 && length <= maxChars; + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + if (value.length() >= minChars) { + return super.termQuery(value, context); + } + List automata = new ArrayList<>(); + automata.add(Automata.makeString(value)); + for (int i = value.length(); i < minChars; i++) { + automata.add(Automata.makeAnyChar()); + } + Automaton automaton = Operations.concatenate(automata); + AutomatonQuery query = new AutomatonQuery(new Term(name(), value + "*"), automaton); + query.setRewriteMethod(method); + return new BooleanQuery.Builder() + .add(query, BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(parentField, value)), BooleanClause.Occur.SHOULD) + .build(); + } + + @Override + public PrefixFieldType clone() { + return new PrefixFieldType(this); + } + + @Override + public String typeName() { + return "prefix"; + } + + @Override + public String toString() { + return super.toString() + ",prefixChars=" + minChars + ":" + maxChars; + } + + @Override + public Query existsQuery(QueryShardContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + PrefixFieldType that = (PrefixFieldType) o; + return minChars == that.minChars && + maxChars == that.maxChars; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), minChars, maxChars); + } + } + + static final class PrefixFieldMapper extends FieldMapper { + + PrefixFieldMapper(PrefixFieldType fieldType, Settings indexSettings) { + super(fieldType.name(), fieldType, fieldType, indexSettings, MultiFields.empty(), CopyTo.empty()); + } + + @Override + public PrefixFieldType fieldType() { + return (PrefixFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) { + throw new UnsupportedOperationException(); + } + + @Override + protected String contentType() { + return "prefix"; + } + + @Override + public String toString() { + return fieldType().toString(); + } + } + + static final class ShingleFieldMapper extends FieldMapper { + + ShingleFieldMapper(ShingleFieldType fieldType, Settings indexSettings) { + super(fieldType.name(), fieldType, fieldType, indexSettings, MultiFields.empty(), CopyTo.empty()); + } + + @Override + public ShingleFieldType fieldType() { + return (ShingleFieldType) super.fieldType(); + } + + @Override + protected void parseCreateField(ParseContext context, List fields) { + throw new UnsupportedOperationException(); + } + + @Override + protected String contentType() { + return "shingle"; + } + } + + /** + * The shingle field type handles phrase queries and delegates prefix and phrase prefix queries to the prefix field + */ + static class ShingleFieldType extends StringFieldType { + final int shingleSize; + PrefixFieldType prefixFieldType; + + ShingleFieldType(MappedFieldType other, int shingleSize) { + super(other); + this.shingleSize = shingleSize; + this.setStored(false); + } + + ShingleFieldType(ShingleFieldType other) { + super(other); + this.shingleSize = other.shingleSize; + if (other.prefixFieldType != null) { + this.prefixFieldType = other.prefixFieldType.clone(); + } + } + + void setPrefixFieldType(PrefixFieldType prefixFieldType) { + checkIfFrozen(); + this.prefixFieldType = prefixFieldType; + } + + @Override + public ShingleFieldType clone() { + return new ShingleFieldType(this); + } + + @Override + public String typeName() { + return CONTENT_TYPE; + } + + @Override + public Query existsQuery(QueryShardContext context) { + if (omitNorms()) { + return new TermQuery(new Term(FieldNamesFieldMapper.NAME, name())); + } else { + return new NormsFieldExistsQuery(name()); + } + } + + @Override + public Query prefixQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + if (prefixFieldType == null || prefixFieldType.termLengthWithinBounds(value.length()) == false) { + return super.prefixQuery(value, method, context); + } else { + final Query query = prefixFieldType.prefixQuery(value, method, context); + if (method == null + || method == MultiTermQuery.CONSTANT_SCORE_REWRITE + || method == MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE) { + return new ConstantScoreQuery(query); + } else { + return query; + } + } + } + + @Override + public Query phraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + return TextFieldMapper.createPhraseQuery(stream, name(), slop, enablePositionIncrements); + } + + @Override + public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositionIncrements) throws IOException { + return TextFieldMapper.createPhraseQuery(stream, name(), slop, enablePositionIncrements); + } + + @Override + public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { + final String prefixFieldName = slop > 0 + ? null + : prefixFieldType.name(); + return TextFieldMapper.createPhrasePrefixQuery(stream, name(), slop, maxExpansions, + prefixFieldName, prefixFieldType::termLengthWithinBounds); + } + + @Override + public SpanQuery spanPrefixQuery(String value, SpanMultiTermQueryWrapper.SpanRewriteMethod method, QueryShardContext context) { + if (prefixFieldType != null && prefixFieldType.termLengthWithinBounds(value.length())) { + return new FieldMaskingSpanQuery(new SpanTermQuery(new Term(prefixFieldType.name(), indexedValueForSearch(value))), name()); + } else { + SpanMultiTermQueryWrapper spanMulti = + new SpanMultiTermQueryWrapper<>(new PrefixQuery(new Term(name(), indexedValueForSearch(value)))); + spanMulti.setRewriteMethod(method); + return spanMulti; + } + } + + @Override + public void checkCompatibility(MappedFieldType other, List conflicts) { + super.checkCompatibility(other, conflicts); + ShingleFieldType ft = (ShingleFieldType) other; + if (ft.shingleSize != this.shingleSize) { + conflicts.add("mapper [" + name() + "] has different [shingle_size] values"); + } + if (Objects.equals(this.prefixFieldType, ft.prefixFieldType) == false) { + conflicts.add("mapper [" + name() + "] has different [index_prefixes] settings"); + } + } + + @Override + public boolean equals(Object otherObject) { + if (this == otherObject) { + return true; + } + if (otherObject == null || getClass() != otherObject.getClass()) { + return false; + } + if (!super.equals(otherObject)) { + return false; + } + final ShingleFieldType other = (ShingleFieldType) otherObject; + return shingleSize == other.shingleSize + && Objects.equals(prefixFieldType, other.prefixFieldType); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), shingleSize, prefixFieldType); + } + } + + private final int maxShingleSize; + private PrefixFieldMapper prefixField; + private final ShingleFieldMapper[] shingleFields; + + public SearchAsYouTypeFieldMapper(String simpleName, + SearchAsYouTypeFieldType fieldType, + Settings indexSettings, + CopyTo copyTo, + int maxShingleSize, + PrefixFieldMapper prefixField, + ShingleFieldMapper[] shingleFields) { + super(simpleName, fieldType, Defaults.FIELD_TYPE, indexSettings, MultiFields.empty(), copyTo); + this.prefixField = prefixField; + this.shingleFields = shingleFields; + this.maxShingleSize = maxShingleSize; + } + + @Override + public FieldMapper updateFieldType(Map fullNameToFieldType) { + SearchAsYouTypeFieldMapper fieldMapper = (SearchAsYouTypeFieldMapper) super.updateFieldType(fullNameToFieldType); + fieldMapper.prefixField = (PrefixFieldMapper) fieldMapper.prefixField.updateFieldType(fullNameToFieldType); + for (int i = 0; i < fieldMapper.shingleFields.length; i++) { + fieldMapper.shingleFields[i] = (ShingleFieldMapper) fieldMapper.shingleFields[i].updateFieldType(fullNameToFieldType); + } + return fieldMapper; + } + + @Override + protected void parseCreateField(ParseContext context, List fields) throws IOException { + final String value = context.externalValueSet() ? context.externalValue().toString() : context.parser().textOrNull(); + if (value == null) { + return; + } + + List newFields = new ArrayList<>(); + newFields.add(new Field(fieldType().name(), value, fieldType())); + for (ShingleFieldMapper subFieldMapper : shingleFields) { + fields.add(new Field(subFieldMapper.fieldType().name(), value, subFieldMapper.fieldType())); + } + newFields.add(new Field(prefixField.fieldType().name(), value, prefixField.fieldType())); + if (fieldType().omitNorms()) { + createFieldNamesField(context, newFields); + } + fields.addAll(newFields); + } + + @Override + protected String contentType() { + return CONTENT_TYPE; + } + + @Override + protected void doMerge(Mapper mergeWith) { + super.doMerge(mergeWith); + SearchAsYouTypeFieldMapper mw = (SearchAsYouTypeFieldMapper) mergeWith; + if (mw.maxShingleSize != maxShingleSize) { + throw new IllegalArgumentException("mapper [" + name() + "] has different [max_shingle_size] setting, current [" + + this.maxShingleSize + "], merged [" + mw.maxShingleSize + "]"); + } + this.prefixField = (PrefixFieldMapper) this.prefixField.merge(mw.prefixField); + + ShingleFieldMapper[] shingleFieldMappers = new ShingleFieldMapper[mw.shingleFields.length]; + for (int i = 0; i < shingleFieldMappers.length; i++) { + this.shingleFields[i] = (ShingleFieldMapper) this.shingleFields[i].merge(mw.shingleFields[i]); + } + } + + public static String getShingleFieldName(String parentField, int shingleSize) { + return parentField + "._" + shingleSize + "gram"; + } + + @Override + public SearchAsYouTypeFieldType fieldType() { + return (SearchAsYouTypeFieldType) super.fieldType(); + } + + public int maxShingleSize() { + return maxShingleSize; + } + + public PrefixFieldMapper prefixField() { + return prefixField; + } + + public ShingleFieldMapper[] shingleFields() { + return shingleFields; + } + + @Override + protected void doXContentBody(XContentBuilder builder, boolean includeDefaults, Params params) throws IOException { + super.doXContentBody(builder, includeDefaults, params); + doXContentAnalyzers(builder, includeDefaults); + builder.field("max_shingle_size", maxShingleSize); + } + + @Override + public Iterator iterator() { + List subIterators = new ArrayList<>(); + subIterators.add(prefixField); + subIterators.addAll(Arrays.asList(shingleFields)); + @SuppressWarnings("unchecked") Iterator concat = Iterators.concat(super.iterator(), subIterators.iterator()); + return concat; + } + + /** + * An analyzer wrapper to add a shingle token filter, an edge ngram token filter or both to its wrapped analyzer. When adding an edge + * ngrams token filter, it also adds a {@link TrailingShingleTokenFilter} to add extra position increments at the end of the stream + * to induce the shingle token filter to create tokens at the end of the stream smaller than the shingle size + */ + static class SearchAsYouTypeAnalyzer extends AnalyzerWrapper { + + private final Analyzer delegate; + private final int shingleSize; + private final boolean indexPrefixes; + + private SearchAsYouTypeAnalyzer(Analyzer delegate, + int shingleSize, + boolean indexPrefixes) { + + super(delegate.getReuseStrategy()); + this.delegate = Objects.requireNonNull(delegate); + this.shingleSize = shingleSize; + this.indexPrefixes = indexPrefixes; + } + + static SearchAsYouTypeAnalyzer withShingle(Analyzer delegate, int shingleSize) { + return new SearchAsYouTypeAnalyzer(delegate, shingleSize, false); + } + + static SearchAsYouTypeAnalyzer withShingleAndPrefix(Analyzer delegate, int shingleSize) { + return new SearchAsYouTypeAnalyzer(delegate, shingleSize, true); + } + + @Override + protected Analyzer getWrappedAnalyzer(String fieldName) { + return delegate; + } + + @Override + protected TokenStreamComponents wrapComponents(String fieldName, TokenStreamComponents components) { + TokenStream tokenStream = components.getTokenStream(); + if (indexPrefixes) { + tokenStream = new TrailingShingleTokenFilter(tokenStream, shingleSize - 1); + } + tokenStream = new FixedShingleFilter(tokenStream, shingleSize, " ", ""); + if (indexPrefixes) { + tokenStream = new EdgeNGramTokenFilter(tokenStream, Defaults.MIN_GRAM, Defaults.MAX_GRAM, true); + } + return new TokenStreamComponents(components.getSource(), tokenStream); + } + + public int shingleSize() { + return shingleSize; + } + + public boolean indexPrefixes() { + return indexPrefixes; + } + + @Override + public String toString() { + return "<" + getClass().getCanonicalName() + " shingleSize=[" + shingleSize + "] indexPrefixes=[" + indexPrefixes + "]>"; + } + + private static class TrailingShingleTokenFilter extends TokenFilter { + + private final int extraPositionIncrements; + private final PositionIncrementAttribute positionIncrementAttribute; + + TrailingShingleTokenFilter(TokenStream input, int extraPositionIncrements) { + super(input); + this.extraPositionIncrements = extraPositionIncrements; + this.positionIncrementAttribute = addAttribute(PositionIncrementAttribute.class); + } + + @Override + public boolean incrementToken() throws IOException { + return input.incrementToken(); + } + + @Override + public void end() throws IOException { + super.end(); + positionIncrementAttribute.setPositionIncrement(extraPositionIncrements); + } + } + } +} diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java index adf46d6a60d25..931e27bc1c19f 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/SparseVectorFieldMapper.java @@ -46,7 +46,7 @@ public class SparseVectorFieldMapper extends FieldMapper { public static final String CONTENT_TYPE = "sparse_vector"; - public static short MAX_DIMS_COUNT = 500; //maximum allowed number of dimensions + public static short MAX_DIMS_COUNT = 1024; //maximum allowed number of dimensions public static int MAX_DIMS_NUMBER = 65535; //maximum allowed dimension's number public static class Defaults { @@ -178,10 +178,9 @@ public void parse(ParseContext context) throws IOException { } dims[dimCount] = dim; values[dimCount] = value; - dimCount ++; - if (dimCount >= MAX_DIMS_COUNT) { + if (dimCount++ >= MAX_DIMS_COUNT) { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + - "] has exceeded the maximum allowed number of dimensions of :[" + MAX_DIMS_COUNT + "]"); + "] has exceeded the maximum allowed number of dimensions of [" + MAX_DIMS_COUNT + "]"); } } else { throw new IllegalArgumentException("Field [" + name() + "] of type [" + typeName() + diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java index 2239c99a310f5..cf6fc99657756 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/DenseVectorFieldMapperTests.java @@ -30,18 +30,19 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; -import org.hamcrest.Matchers; +import org.junit.Before; +import java.io.IOException; import java.util.Collection; -public class DenseVectorFieldMapperTests extends ESSingleNodeTestCase { +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.instanceOf; - @Override - protected Collection> getPlugins() { - return pluginList(MapperExtrasPlugin.class); - } +public class DenseVectorFieldMapperTests extends ESSingleNodeTestCase { + private DocumentMapper mapper; - public void testDefaults() throws Exception { + @Before + public void setUpMapper() throws Exception { IndexService indexService = createIndex("test-index"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder() @@ -53,10 +54,15 @@ public void testDefaults() throws Exception { .endObject() .endObject() .endObject()); + mapper = parser.parse("_doc", new CompressedXContent(mapping)); + } - DocumentMapper mapper = parser.parse("_doc", new CompressedXContent(mapping)); - assertEquals(mapping, mapper.mappingSource().toString()); + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + public void testDefaults() throws Exception { float[] expectedArray = {-12.1f, 100.7f, -4}; ParsedDocument doc1 = mapper.parse(new SourceToParse("test-index", "_doc", "1", BytesReference .bytes(XContentFactory.jsonBuilder() @@ -66,7 +72,7 @@ public void testDefaults() throws Exception { XContentType.JSON)); IndexableField[] fields = doc1.rootDoc().getFields("my-dense-vector"); assertEquals(1, fields.length); - assertThat(fields[0], Matchers.instanceOf(BinaryDocValuesField.class)); + assertThat(fields[0], instanceOf(BinaryDocValuesField.class)); // assert that after decoding the indexed value is equal to expected BytesRef vectorBR = ((BinaryDocValuesField) fields[0]).binaryValue(); @@ -78,4 +84,22 @@ public void testDefaults() throws Exception { 0.001f ); } + + public void testDimensionLimit() throws IOException { + float[] validVector = new float[DenseVectorFieldMapper.MAX_DIMS_COUNT]; + BytesReference validDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .array("my-dense-vector", validVector) + .endObject()); + mapper.parse(new SourceToParse("test-index", "_doc", "1", validDoc, XContentType.JSON)); + + float[] invalidVector = new float[DenseVectorFieldMapper.MAX_DIMS_COUNT + 1]; + BytesReference invalidDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .array("my-dense-vector", invalidVector) + .endObject()); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse( + new SourceToParse("test-index", "_doc", "1", invalidDoc, XContentType.JSON))); + assertThat(e.getDetailedMessage(), containsString("has exceeded the maximum allowed number of dimensions")); + } } diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeAnalyzerTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeAnalyzerTests.java new file mode 100644 index 0000000000000..6cf0dc83d9070 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeAnalyzerTests.java @@ -0,0 +1,197 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.core.SimpleAnalyzer; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeAnalyzer; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; +import java.util.stream.IntStream; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; +import static org.hamcrest.Matchers.equalTo; + +public class SearchAsYouTypeAnalyzerTests extends ESTestCase { + + private static final Analyzer SIMPLE = new SimpleAnalyzer(); + + public static List analyze(SearchAsYouTypeAnalyzer analyzer, String text) throws IOException { + final List tokens = new ArrayList<>(); + try (TokenStream tokenStream = analyzer.tokenStream("field", text)) { + final CharTermAttribute charTermAttribute = tokenStream.addAttribute(CharTermAttribute.class); + tokenStream.reset(); + while (tokenStream.incrementToken()) { + tokens.add(charTermAttribute.toString()); + } + } + return tokens; + } + + private void testCase(String text, + Function analyzerFunction, + Function> expectedTokensFunction) throws IOException { + + for (int shingleSize = 2; shingleSize <= 4; shingleSize++) { + final SearchAsYouTypeAnalyzer analyzer = analyzerFunction.apply(shingleSize); + final List expectedTokens = expectedTokensFunction.apply(shingleSize); + final List actualTokens = analyze(analyzer, text); + assertThat("analyzed correctly with " + analyzer, actualTokens, equalTo(expectedTokens)); + } + } + + public void testSingleTermShingles() throws IOException { + testCase( + "quick", + shingleSize -> SearchAsYouTypeAnalyzer.withShingle(SIMPLE, shingleSize), + shingleSize -> emptyList() + ); + } + + public void testMultiTermShingles() throws IOException { + testCase( + "quick brown fox jump lazy", + shingleSize -> SearchAsYouTypeAnalyzer.withShingle(SIMPLE, shingleSize), + shingleSize -> { + if (shingleSize == 2) { + return asList("quick brown", "brown fox", "fox jump", "jump lazy"); + } else if (shingleSize == 3) { + return asList("quick brown fox", "brown fox jump", "fox jump lazy"); + } else if (shingleSize == 4) { + return asList("quick brown fox jump", "brown fox jump lazy"); + } + throw new IllegalArgumentException(); + } + ); + } + + public void testSingleTermPrefix() throws IOException { + testCase( + "quick", + shingleSize -> SearchAsYouTypeAnalyzer.withShingleAndPrefix(SIMPLE, shingleSize), + shingleSize -> { + final List tokens = new ArrayList<>(asList("q", "qu", "qui", "quic", "quick")); + tokens.addAll(tokenWithSpaces("quick", shingleSize)); + return tokens; + } + ); + } + + public void testMultiTermPrefix() throws IOException { + testCase( + //"quick red fox lazy brown", + "quick brown fox jump lazy", + shingleSize -> SearchAsYouTypeAnalyzer.withShingleAndPrefix(SIMPLE, shingleSize), + shingleSize -> { + if (shingleSize == 2) { + final List tokens = new ArrayList<>(); + tokens.addAll(asList( + "q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown" + )); + tokens.addAll(asList( + "b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox" + )); + tokens.addAll(asList( + "f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump" + )); + tokens.addAll(asList( + "j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy" + )); + tokens.addAll(asList( + "l", "la", "laz", "lazy" + )); + tokens.addAll(tokenWithSpaces("lazy", shingleSize)); + return tokens; + } else if (shingleSize == 3) { + final List tokens = new ArrayList<>(); + tokens.addAll(asList( + "q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown", + "quick brown ", "quick brown f", "quick brown fo", "quick brown fox" + )); + tokens.addAll(asList( + "b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox", "brown fox ", "brown fox j", + "brown fox ju", "brown fox jum", "brown fox jump" + )); + tokens.addAll(asList( + "f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump", "fox jump ", "fox jump l", "fox jump la", + "fox jump laz", "fox jump lazy" + )); + tokens.addAll(asList( + "j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy" + )); + tokens.addAll(tokenWithSpaces("jump lazy", shingleSize - 1)); + tokens.addAll(asList( + "l", "la", "laz", "lazy" + )); + tokens.addAll(tokenWithSpaces("lazy", shingleSize)); + return tokens; + } else if (shingleSize == 4) { + final List tokens = new ArrayList<>(); + tokens.addAll(asList( + "q", "qu", "qui", "quic", "quick", "quick ", "quick b", "quick br", "quick bro", "quick brow", "quick brown", + "quick brown ", "quick brown f", "quick brown fo", "quick brown fox", "quick brown fox ", "quick brown fox j", + "quick brown fox ju", "quick brown fox jum", "quick brown fox jump" + )); + tokens.addAll(asList( + "b", "br", "bro", "brow", "brown", "brown ", "brown f", "brown fo", "brown fox", "brown fox ", "brown fox j", + "brown fox ju", "brown fox jum", "brown fox jump", "brown fox jump ", "brown fox jump l", "brown fox jump la", + "brown fox jump laz", "brown fox jump lazy" + )); + tokens.addAll(asList( + "f", "fo", "fox", "fox ", "fox j", "fox ju", "fox jum", "fox jump", "fox jump ", "fox jump l", "fox jump la", + "fox jump laz", "fox jump lazy" + )); + tokens.addAll(tokenWithSpaces("fox jump lazy", shingleSize - 2)); + tokens.addAll(asList( + "j", "ju", "jum", "jump", "jump ", "jump l", "jump la", "jump laz", "jump lazy" + )); + tokens.addAll(tokenWithSpaces("jump lazy", shingleSize - 1)); + tokens.addAll(asList( + "l", "la", "laz", "lazy" + )); + tokens.addAll(tokenWithSpaces("lazy", shingleSize)); + return tokens; + } + + throw new IllegalArgumentException(); + } + ); + } + + private static List tokenWithSpaces(String text, int maxShingleSize) { + return IntStream.range(1, maxShingleSize).mapToObj(i -> text + spaces(i)).collect(toList()); + } + + private static String spaces(int count) { + final StringBuilder builder = new StringBuilder(); + for (int i = 0; i < count; i++) { + builder.append(" "); + } + return builder.toString(); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapperTests.java new file mode 100644 index 0000000000000..4622b34ea1514 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldMapperTests.java @@ -0,0 +1,813 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.mapper; + +import org.apache.lucene.document.FieldType; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.DisjunctionMaxQuery; +import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.MultiPhraseQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.spans.FieldMaskingSpanQuery; +import org.apache.lucene.search.spans.SpanNearQuery; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.PrefixFieldMapper; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.PrefixFieldType; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeAnalyzer; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeFieldType; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.ShingleFieldMapper; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.ShingleFieldType; +import org.elasticsearch.index.query.MatchPhrasePrefixQueryBuilder; +import org.elasticsearch.index.query.MatchPhraseQueryBuilder; +import org.elasticsearch.index.query.MultiMatchQueryBuilder; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESSingleNodeTestCase; +import org.hamcrest.Matcher; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasProperty; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.collection.IsArrayContainingInAnyOrder.arrayContainingInAnyOrder; +import static org.hamcrest.core.IsInstanceOf.instanceOf; + +public class SearchAsYouTypeFieldMapperTests extends ESSingleNodeTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(MapperExtrasPlugin.class); + } + + public void testIndexing() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper mapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + ParsedDocument doc = mapper.parse(new SourceToParse("test", "_doc", "1", BytesReference + .bytes(XContentFactory.jsonBuilder() + .startObject() + .field("a_field", "new york city") + .endObject()), + XContentType.JSON)); + + for (String field : new String[] { "a_field", "a_field._index_prefix", "a_field._2gram", "a_field._3gram"}) { + IndexableField[] fields = doc.rootDoc().getFields(field); + assertEquals(1, fields.length); + assertEquals("new york city", fields[0].stringValue()); + } + } + + public void testDefaultConfiguration() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + final SearchAsYouTypeFieldMapper rootMapper = getRootFieldMapper(defaultMapper, "a_field"); + assertRootFieldMapper(rootMapper, 3, "default"); + + + final PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"); + assertPrefixFieldType(prefixFieldMapper.fieldType(), 3, "default"); + + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._2gram").fieldType(), 2, "default", prefixFieldMapper.fieldType()); + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._3gram").fieldType(), 3, "default", prefixFieldMapper.fieldType()); + } + + public void testConfiguration() throws IOException { + final int maxShingleSize = 4; + final String analyzerName = "simple"; + + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("analyzer", analyzerName) + .field("max_shingle_size", maxShingleSize) + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + final SearchAsYouTypeFieldMapper rootMapper = getRootFieldMapper(defaultMapper, "a_field"); + assertRootFieldMapper(rootMapper, maxShingleSize, analyzerName); + + final PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"); + assertPrefixFieldType(prefixFieldMapper.fieldType(), maxShingleSize, analyzerName); + + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._2gram").fieldType(), 2, analyzerName, prefixFieldMapper.fieldType()); + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._3gram").fieldType(), 3, analyzerName, prefixFieldMapper.fieldType()); + assertShingleFieldType( + getShingleFieldMapper(defaultMapper, "a_field._4gram").fieldType(), 4, analyzerName, prefixFieldMapper.fieldType()); + } + + public void testSimpleMerge() throws IOException { + MapperService mapperService = createIndex("test").mapperService(); + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("analyzer", "standard") + .endObject() + .endObject() + .endObject().endObject()); + DocumentMapper mapper = mapperService.merge("_doc", + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + } + + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("analyzer", "standard") + .endObject() + .startObject("b_field") + .field("type", "text") + .endObject() + .endObject() + .endObject().endObject()); + DocumentMapper mapper = mapperService.merge("_doc", + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + } + + { + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("analyzer", "standard") + .field("max_shingle_size", "4") + .endObject() + .startObject("b_field") + .field("type", "text") + .endObject() + .endObject() + .endObject().endObject()); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> mapperService.merge("_doc", + new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE)); + assertThat(e.getMessage(), containsString("different [max_shingle_size]")); + } + } + + public void testIndexOptions() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("index_options", "offsets") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertThat("for " + mapper.name(), + mapper.fieldType().indexOptions(), equalTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS))); + } + + public void testStore() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("store", "true") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + assertTrue(getRootFieldMapper(defaultMapper, "a_field").fieldType().stored()); + Stream.of( + getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertFalse("for " + mapper.name(), mapper.fieldType().stored())); + } + + public void testIndex() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("index", "false") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertThat("for " + mapper.name(), mapper.fieldType().indexOptions(), equalTo(IndexOptions.NONE))); + } + + public void testTermVectors() throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("term_vector", "yes") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertTrue("for " + mapper.name(), mapper.fieldType().storeTermVectors())); + + final PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"); + assertFalse(prefixFieldMapper.fieldType().storeTermVectors()); + } + + public void testNorms() throws IOException { + // default setting + { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test-1") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertFalse("for " + mapper.name(), mapper.fieldType().omitNorms())); + + final PrefixFieldMapper prefixFieldMapper = getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"); + assertTrue(prefixFieldMapper.fieldType().omitNorms()); + } + + // can disable them on shingle fields + { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("norms", "false") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test-2") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + Stream.of( + getRootFieldMapper(defaultMapper, "a_field"), + getPrefixFieldMapper(defaultMapper, "a_field._index_prefix"), + getShingleFieldMapper(defaultMapper, "a_field._2gram"), + getShingleFieldMapper(defaultMapper, "a_field._3gram") + ).forEach(mapper -> assertTrue("for " + mapper.name(), mapper.fieldType().omitNorms())); + } + } + + + public void testDocumentParsingSingleValue() throws IOException { + documentParsingTestCase(Collections.singleton(randomAlphaOfLengthBetween(5, 20))); + } + + public void testDocumentParsingMultipleValues() throws IOException { + documentParsingTestCase(randomUnique(() -> randomAlphaOfLengthBetween(3, 20), randomIntBetween(2, 10))); + } + + public void testMatchPhrasePrefix() throws IOException { + IndexService indexService = createIndex("test", Settings.EMPTY); + QueryShardContext queryShardContext = indexService.newQueryShardContext( + randomInt(20), null, () -> { + throw new UnsupportedOperationException(); + }, null); + + String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties") + .startObject("field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject().endObject()); + + queryShardContext.getMapperService().merge("type", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "two words").toQuery(queryShardContext); + Query expected = new SynonymQuery(new Term("field._index_prefix", "two words")); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "three words here").toQuery(queryShardContext); + Query expected = new SynonymQuery(new Term("field._index_prefix", "three words here")); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "two words").slop(1).toQuery(queryShardContext); + MultiPhrasePrefixQuery mpq = new MultiPhrasePrefixQuery("field"); + mpq.setSlop(1); + mpq.add(new Term("field", "two")); + mpq.add(new Term("field", "words")); + assertThat(q, equalTo(mpq)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field", "more than three words").toQuery(queryShardContext); + Query expected = new SpanNearQuery.Builder("field._3gram", true) + .addClause(new SpanTermQuery(new Term("field._3gram", "more than three"))) + .addClause(new FieldMaskingSpanQuery( + new SpanTermQuery(new Term("field._index_prefix", "than three words")), "field._3gram") + ) + .build(); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field._3gram", "more than three words").toQuery(queryShardContext); + Query expected = new SpanNearQuery.Builder("field._3gram", true) + .addClause(new SpanTermQuery(new Term("field._3gram", "more than three"))) + .addClause(new FieldMaskingSpanQuery( + new SpanTermQuery(new Term("field._index_prefix", "than three words")), "field._3gram") + ) + .build(); + assertThat(q, equalTo(expected)); + } + + { + Query q = new MatchPhrasePrefixQueryBuilder("field._3gram", "two words").toQuery(queryShardContext); + Query expected = new MatchNoDocsQuery(); + assertThat(q, equalTo(expected)); + } + + { + Query actual = new MatchPhrasePrefixQueryBuilder("field._3gram", "one two three four") + .slop(1) + .toQuery(queryShardContext); + MultiPhrasePrefixQuery expected = new MultiPhrasePrefixQuery("field._3gram"); + expected.setSlop(1); + expected.add(new Term("field._3gram", "one two three")); + expected.add(new Term("field._3gram", "two three four")); + assertThat(actual, equalTo(expected)); + } + + } + + public void testMatchPhrase() throws IOException { + final IndexService indexService = createIndex("test", Settings.EMPTY); + final QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, + () -> { throw new UnsupportedOperationException(); }, null); + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + queryShardContext.getMapperService().merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one") + .toQuery(queryShardContext); + final Query expected = new TermQuery(new Term("a_field", "one")); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one two") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._2gram", "one two")) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one two three") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._3gram", "one two three")) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one two three four") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._3gram", "one two three")) + .add(new Term("a_field._3gram", "two three four")) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field", "one two") + .slop(1) + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field", "one")) + .add(new Term("a_field", "two")) + .setSlop(1) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._2gram", "one two") + .toQuery(queryShardContext); + final Query expected = new TermQuery(new Term("a_field._2gram", "one two")); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._2gram", "one two three") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._2gram", "one two")) + .add(new Term("a_field._2gram", "two three")) + .build(); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._3gram", "one two three") + .toQuery(queryShardContext); + final Query expected = new TermQuery(new Term("a_field._3gram", "one two three")); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._3gram", "one two three four") + .toQuery(queryShardContext); + final Query expected = new MultiPhraseQuery.Builder() + .add(new Term("a_field._3gram", "one two three")) + .add(new Term("a_field._3gram", "two three four")) + .build(); + assertThat(actual, equalTo(expected)); + } + + // todo are these queries generated for the prefix field right? + { + final Query actual = new MatchPhraseQueryBuilder("a_field._index_prefix", "one two") + .toQuery(queryShardContext); + final Query expected = new MatchNoDocsQuery("Matching no documents because no terms present"); + assertThat(actual, equalTo(expected)); + } + + { + final Query actual = new MatchPhraseQueryBuilder("a_field._index_prefix", "one two three") + .toQuery(queryShardContext); + final Query expected = new TermQuery(new Term("a_field._index_prefix", "one two three")); + assertThat(actual, equalTo(expected)); + } + + { + expectThrows(IllegalArgumentException.class, + () -> new MatchPhraseQueryBuilder("a_field._index_prefix", "one two three four").toQuery(queryShardContext)); + } + } + + private static BooleanQuery buildBoolPrefixQuery(String shingleFieldName, String prefixFieldName, List terms) { + final BooleanQuery.Builder builder = new BooleanQuery.Builder(); + for (int i = 0; i < terms.size() - 1; i++) { + final String term = terms.get(i); + builder.add(new BooleanClause(new TermQuery(new Term(shingleFieldName, term)), BooleanClause.Occur.SHOULD)); + } + final String finalTerm = terms.get(terms.size() - 1); + builder.add(new BooleanClause( + new ConstantScoreQuery(new TermQuery(new Term(prefixFieldName, finalTerm))), BooleanClause.Occur.SHOULD)); + return builder.build(); + } + + public void testMultiMatchBoolPrefix() throws IOException { + final IndexService indexService = createIndex("test", Settings.EMPTY); + final QueryShardContext queryShardContext = indexService.newQueryShardContext(randomInt(20), null, + () -> { throw new UnsupportedOperationException(); }, null); + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .field("max_shingle_size", 4) + .endObject() + .endObject() + .endObject() + .endObject()); + + queryShardContext.getMapperService().merge("_doc", new CompressedXContent(mapping), MapperService.MergeReason.MAPPING_UPDATE); + + final MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder( + "quick brown fox jump lazy dog", + "a_field", + "a_field._2gram", + "a_field._3gram", + "a_field._4gram" + ); + builder.type(MultiMatchQueryBuilder.Type.BOOL_PREFIX); + + final Query actual = builder.toQuery(queryShardContext); + assertThat(actual, instanceOf(DisjunctionMaxQuery.class)); + final DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) actual; + assertThat(disMaxQuery.getDisjuncts(), hasSize(4)); + assertThat(disMaxQuery.getDisjuncts(), containsInAnyOrder( + buildBoolPrefixQuery( + "a_field", "a_field._index_prefix", asList("quick", "brown", "fox", "jump", "lazy", "dog")), + buildBoolPrefixQuery("a_field._2gram", "a_field._index_prefix", + asList("quick brown", "brown fox", "fox jump", "jump lazy", "lazy dog")), + buildBoolPrefixQuery("a_field._3gram", "a_field._index_prefix", + asList("quick brown fox", "brown fox jump", "fox jump lazy", "jump lazy dog")), + buildBoolPrefixQuery("a_field._4gram", "a_field._index_prefix", + asList("quick brown fox jump", "brown fox jump lazy", "fox jump lazy dog")))); + } + + private void documentParsingTestCase(Collection values) throws IOException { + final String mapping = Strings.toString(XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("a_field") + .field("type", "search_as_you_type") + .endObject() + .endObject() + .endObject() + .endObject()); + + final DocumentMapper defaultMapper = createIndex("test") + .mapperService() + .documentMapperParser() + .parse("_doc", new CompressedXContent(mapping)); + + final XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + if (values.size() > 1) { + builder.array("a_field", values.toArray(new String[0])); + } else { + builder.field("a_field", values.iterator().next()); + } + builder.endObject(); + final ParsedDocument parsedDocument = defaultMapper.parse( + new SourceToParse("test", "_doc", "1", BytesReference.bytes(builder), XContentType.JSON)); + + + final Set> rootFieldMatchers = values.stream() + .map(value -> indexableFieldMatcher(value, SearchAsYouTypeFieldType.class)) + .collect(Collectors.toSet()); + final Set> shingleFieldMatchers = values.stream() + .map(value -> indexableFieldMatcher(value, ShingleFieldType.class)) + .collect(Collectors.toSet()); + final Set> prefixFieldMatchers = values.stream() + .map(value -> indexableFieldMatcher(value, PrefixFieldType.class)) + .collect(Collectors.toSet()); + + // the use of new ArrayList<>() here is to avoid the varargs form of arrayContainingInAnyOrder + assertThat( + parsedDocument.rootDoc().getFields("a_field"), + arrayContainingInAnyOrder(new ArrayList<>(rootFieldMatchers))); + + assertThat( + parsedDocument.rootDoc().getFields("a_field._index_prefix"), + arrayContainingInAnyOrder(new ArrayList<>(prefixFieldMatchers))); + + for (String name : asList("a_field._2gram", "a_field._3gram")) { + assertThat(parsedDocument.rootDoc().getFields(name), arrayContainingInAnyOrder(new ArrayList<>(shingleFieldMatchers))); + } + } + + private static Matcher indexableFieldMatcher(String value, Class fieldTypeClass) { + return Matchers.allOf( + hasProperty(IndexableField::stringValue, equalTo(value)), + hasProperty(IndexableField::fieldType, instanceOf(fieldTypeClass)) + ); + } + + private static void assertRootFieldMapper(SearchAsYouTypeFieldMapper mapper, + int maxShingleSize, + String analyzerName) { + + assertThat(mapper.maxShingleSize(), equalTo(maxShingleSize)); + assertThat(mapper.fieldType(), notNullValue()); + assertSearchAsYouTypeFieldType(mapper.fieldType(), maxShingleSize, analyzerName, mapper.prefixField().fieldType()); + + assertThat(mapper.prefixField(), notNullValue()); + assertThat(mapper.prefixField().fieldType().parentField, equalTo(mapper.name())); + assertPrefixFieldType(mapper.prefixField().fieldType(), maxShingleSize, analyzerName); + + + for (int shingleSize = 2; shingleSize <= maxShingleSize; shingleSize++) { + final ShingleFieldMapper shingleFieldMapper = mapper.shingleFields()[shingleSize - 2]; + assertThat(shingleFieldMapper, notNullValue()); + assertShingleFieldType(shingleFieldMapper.fieldType(), shingleSize, analyzerName, mapper.prefixField().fieldType()); + } + + final int numberOfShingleSubfields = (maxShingleSize - 2) + 1; + assertThat(mapper.shingleFields().length, equalTo(numberOfShingleSubfields)); + } + + private static void assertSearchAsYouTypeFieldType(SearchAsYouTypeFieldType fieldType, int maxShingleSize, + String analyzerName, + PrefixFieldType prefixFieldType) { + + assertThat(fieldType.shingleFields.length, equalTo(maxShingleSize-1)); + for (NamedAnalyzer analyzer : asList(fieldType.indexAnalyzer(), fieldType.searchAnalyzer())) { + assertThat(analyzer.name(), equalTo(analyzerName)); + } + int shingleSize = 2; + for (ShingleFieldType shingleField : fieldType.shingleFields) { + assertShingleFieldType(shingleField, shingleSize++, analyzerName, prefixFieldType); + } + + assertThat(fieldType.prefixField, equalTo(prefixFieldType)); + } + + private static void assertShingleFieldType(ShingleFieldType fieldType, + int shingleSize, + String analyzerName, + PrefixFieldType prefixFieldType) { + + assertThat(fieldType.shingleSize, equalTo(shingleSize)); + + for (NamedAnalyzer analyzer : asList(fieldType.indexAnalyzer(), fieldType.searchAnalyzer())) { + assertThat(analyzer.name(), equalTo(analyzerName)); + if (shingleSize > 1) { + final SearchAsYouTypeAnalyzer wrappedAnalyzer = (SearchAsYouTypeAnalyzer) analyzer.analyzer(); + assertThat(wrappedAnalyzer.shingleSize(), equalTo(shingleSize)); + assertThat(wrappedAnalyzer.indexPrefixes(), equalTo(false)); + } + } + + assertThat(fieldType.prefixFieldType, equalTo(prefixFieldType)); + + } + + private static void assertPrefixFieldType(PrefixFieldType fieldType, int shingleSize, String analyzerName) { + for (NamedAnalyzer analyzer : asList(fieldType.indexAnalyzer(), fieldType.searchAnalyzer())) { + assertThat(analyzer.name(), equalTo(analyzerName)); + } + + final SearchAsYouTypeAnalyzer wrappedIndexAnalyzer = (SearchAsYouTypeAnalyzer) fieldType.indexAnalyzer().analyzer(); + final SearchAsYouTypeAnalyzer wrappedSearchAnalyzer = (SearchAsYouTypeAnalyzer) fieldType.searchAnalyzer().analyzer(); + for (SearchAsYouTypeAnalyzer analyzer : asList(wrappedIndexAnalyzer, wrappedSearchAnalyzer)) { + assertThat(analyzer.shingleSize(), equalTo(shingleSize)); + } + assertThat(wrappedIndexAnalyzer.indexPrefixes(), equalTo(true)); + assertThat(wrappedSearchAnalyzer.indexPrefixes(), equalTo(false)); + } + + private static SearchAsYouTypeFieldMapper getRootFieldMapper(DocumentMapper defaultMapper, String fieldName) { + final Mapper mapper = defaultMapper.mappers().getMapper(fieldName); + assertThat(mapper, instanceOf(SearchAsYouTypeFieldMapper.class)); + return (SearchAsYouTypeFieldMapper) mapper; + } + + private static ShingleFieldMapper getShingleFieldMapper(DocumentMapper defaultMapper, String fieldName) { + final Mapper mapper = defaultMapper.mappers().getMapper(fieldName); + assertThat(mapper, instanceOf(ShingleFieldMapper.class)); + return (ShingleFieldMapper) mapper; + } + + private static PrefixFieldMapper getPrefixFieldMapper(DocumentMapper defaultMapper, String fieldName) { + final Mapper mapper = defaultMapper.mappers().getMapper(fieldName); + assertThat(mapper, instanceOf(PrefixFieldMapper.class)); + return (PrefixFieldMapper) mapper; + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldTypeTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldTypeTests.java new file mode 100644 index 0000000000000..523de91809145 --- /dev/null +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SearchAsYouTypeFieldTypeTests.java @@ -0,0 +1,113 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.ConstantScoreQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.Defaults; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.PrefixFieldType; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.SearchAsYouTypeFieldType; +import org.elasticsearch.index.mapper.SearchAsYouTypeFieldMapper.ShingleFieldType; +import org.junit.Before; + +import static java.util.Arrays.asList; +import static org.apache.lucene.search.MultiTermQuery.CONSTANT_SCORE_REWRITE; +import static org.hamcrest.Matchers.equalTo; + +public class SearchAsYouTypeFieldTypeTests extends FieldTypeTestCase { + + private static final String NAME = "a_field"; + private static final String PREFIX_NAME = NAME + "._index_prefix"; + + @Before + public void setupProperties() { + addModifier(new Modifier("max_shingle_size", false) { + @Override + public void modify(MappedFieldType ft) { + SearchAsYouTypeFieldType fieldType = (SearchAsYouTypeFieldType) ft; + fieldType.setShingleFields(new ShingleFieldType[] { + new ShingleFieldType(fieldType, 2), + new ShingleFieldType(fieldType, 3) + }); + } + }); + addModifier(new Modifier("index_prefixes", false) { + @Override + public void modify(MappedFieldType ft) { + SearchAsYouTypeFieldType fieldType = (SearchAsYouTypeFieldType) ft; + fieldType.setPrefixField(new PrefixFieldType(NAME, PREFIX_NAME, 1, 10)); + } + }); + } + + @Override + protected SearchAsYouTypeFieldType createDefaultFieldType() { + final SearchAsYouTypeFieldType fieldType = new SearchAsYouTypeFieldType(); + fieldType.setName(NAME); + fieldType.setPrefixField(new PrefixFieldType(NAME, PREFIX_NAME, Defaults.MIN_GRAM, Defaults.MAX_GRAM)); + fieldType.setShingleFields(new ShingleFieldType[] { new ShingleFieldType(fieldType, 2) }); + return fieldType; + } + + public void testTermQuery() { + final MappedFieldType fieldType = createDefaultFieldType(); + + fieldType.setIndexOptions(IndexOptions.DOCS); + assertThat(fieldType.termQuery("foo", null), equalTo(new TermQuery(new Term(NAME, "foo")))); + + fieldType.setIndexOptions(IndexOptions.NONE); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> fieldType.termQuery("foo", null)); + assertThat(e.getMessage(), equalTo("Cannot search on field [" + NAME + "] since it is not indexed.")); + } + + public void testTermsQuery() { + final MappedFieldType fieldType = createDefaultFieldType(); + + fieldType.setIndexOptions(IndexOptions.DOCS); + assertThat(fieldType.termsQuery(asList("foo", "bar"), null), + equalTo(new TermInSetQuery(NAME, asList(new BytesRef("foo"), new BytesRef("bar"))))); + + fieldType.setIndexOptions(IndexOptions.NONE); + final IllegalArgumentException e = + expectThrows(IllegalArgumentException.class, () -> fieldType.termsQuery(asList("foo", "bar"), null)); + assertThat(e.getMessage(), equalTo("Cannot search on field [" + NAME + "] since it is not indexed.")); + } + + public void testPrefixQuery() { + final SearchAsYouTypeFieldType fieldType = createDefaultFieldType(); + + // this term should be a length that can be rewriteable to a term query on the prefix field + final String withinBoundsTerm = "foo"; + assertThat(fieldType.prefixQuery(withinBoundsTerm, CONSTANT_SCORE_REWRITE, null), + equalTo(new ConstantScoreQuery(new TermQuery(new Term(PREFIX_NAME, withinBoundsTerm))))); + + // our defaults don't allow a situation where a term can be too small + + // this term should be too long to be rewriteable to a term query on the prefix field + final String longTerm = "toolongforourprefixfieldthistermis"; + assertThat(fieldType.prefixQuery(longTerm, CONSTANT_SCORE_REWRITE, null), + equalTo(new PrefixQuery(new Term(NAME, longTerm)))); + } +} diff --git a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java index 06710e39592cc..754a6f1a31803 100644 --- a/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java +++ b/modules/mapper-extras/src/test/java/org/elasticsearch/index/mapper/SparseVectorFieldMapperTests.java @@ -33,7 +33,12 @@ import org.hamcrest.Matchers; import org.junit.Before; +import java.io.IOException; import java.util.Collection; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.core.IsInstanceOf.instanceOf; @@ -42,7 +47,7 @@ public class SparseVectorFieldMapperTests extends ESSingleNodeTestCase { private DocumentMapper mapper; @Before - public void setup() throws Exception { + public void setUpMapper() throws Exception { IndexService indexService = createIndex("test-index"); DocumentMapperParser parser = indexService.mapperService().documentMapperParser(); String mapping = Strings.toString(XContentFactory.jsonBuilder() @@ -100,7 +105,7 @@ public void testDefaults() throws Exception { ); } - public void testErrors() { + public void testDimensionNumberValidation() { // 1. test for an error on negative dimension MapperParsingException e = expectThrows(MapperParsingException.class, () -> { mapper.parse(new SourceToParse("test-index", "_doc", "1", BytesReference @@ -161,4 +166,28 @@ public void testErrors() { assertThat(e.getCause().getMessage(), containsString( "takes an object that maps a dimension number to a float, but got unexpected token [START_ARRAY]")); } + + public void testDimensionLimit() throws IOException { + Map validVector = IntStream.range(0, SparseVectorFieldMapper.MAX_DIMS_COUNT) + .boxed() + .collect(Collectors.toMap(String::valueOf, Function.identity())); + + BytesReference validDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .field("my-sparse-vector", validVector) + .endObject()); + mapper.parse(new SourceToParse("test-index", "_doc", "1", validDoc, XContentType.JSON)); + + Map invalidVector = IntStream.range(0, SparseVectorFieldMapper.MAX_DIMS_COUNT + 1) + .boxed() + .collect(Collectors.toMap(String::valueOf, Function.identity())); + + BytesReference invalidDoc = BytesReference.bytes( + XContentFactory.jsonBuilder().startObject() + .field("my-sparse-vector", invalidVector) + .endObject()); + MapperParsingException e = expectThrows(MapperParsingException.class, () -> mapper.parse( + new SourceToParse("test-index", "_doc", "1", invalidDoc, XContentType.JSON))); + assertThat(e.getDetailedMessage(), containsString("has exceeded the maximum allowed number of dimensions")); + } } diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/10_basic.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/10_basic.yml new file mode 100644 index 0000000000000..3ddcd89347fcb --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/10_basic.yml @@ -0,0 +1,1249 @@ +setup: + - skip: + version: " - 7.0.99" + reason: "added in 7.1.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + a_field: + type: search_as_you_type + analyzer: simple + max_shingle_size: 4 + + - do: + index: + index: test + type: _doc + id: 1 + body: + a_field: "quick brown fox jump lazy dog" + + # this document should not be matched + - do: + index: + index: test + type: _doc + id: 2 + body: + a_field: "xylophone xylophone xylophone" + + - do: + indices.refresh: {} + +--- +"get document": + - do: + get: + index: test + type: _doc + id: 1 + + - is_true: found + - match: { _source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on root field": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field: "quick" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + +# these "search on Xgram" tests repeat the same search for each term we expect to generate +--- +"term query on 2gram": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "quick brown" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "brown fox" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._2gram: "lazy dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on 3gram": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._3gram: "quick brown fox" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._3gram: "brown fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._3gram: "fox jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._3gram: "jump lazy dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on 4gram": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._4gram: "quick brown fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._4gram: "brown fox jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._4gram: "fox jump lazy dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +# we won't check all the terms that this field generates because there are many +--- +"term query on prefix field with prefix term": + + # search term as prefix + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._index_prefix: "quick br" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on prefix field with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._index_prefix: "jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"term query on prefix field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + term: + a_field._index_prefix: "do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 2gram with prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._2gram: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 2gram with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._2gram: "brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 3gram with prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._3gram: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 3gram with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._3gram: "brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 4gram with prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._4gram: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on 4gram with infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field._4gram: "brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quic" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quick b" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quick brown fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "quick brown fox ju" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "fo" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "fox jum" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "fox jump lazy do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"prefix query on root field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + prefix: + a_field: "do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick brown" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick brown fox" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick brown fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 5 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "quick brown fox jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown fox" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown fox jump" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown fox jump lazy" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with 5 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown fox jump lazy dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase query on root field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "dog" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "qui" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "quick b" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "quick brown f" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "quick brown fox ju" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 5 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "quick brown fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "br" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "brown f" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "brown fox ju" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "brown fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with 5 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "brown fox jump lazy d" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"phrase prefix query on root field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase_prefix: + a_field: "do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "qui" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "quick b" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "quick brown f" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "quick brown fox ju" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 5 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "quick brown fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "br" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown f" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fox j" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fox jump la" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with 5 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fox jump lazy d" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field out of order partial trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "fox jump brown do" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"bool prefix query on root field out of order partial leading term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "fox jump brown qui" + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 1 prefix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "qui" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 2 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "quick br" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 3 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "quick brown f" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 4 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "quick brown fox ju" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 5 prefix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "quick brown fox jump la" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 1 infix term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "br" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 2 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown f" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 3 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with 4 infix terms": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query with trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "do" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query out of order with partial trailing term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "fox jump brown do" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query out of order with partial leading term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "fox jump lazy qui" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } diff --git a/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml b/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml new file mode 100644 index 0000000000000..82a599ce686c2 --- /dev/null +++ b/modules/mapper-extras/src/test/resources/rest-api-spec/test/search-as-you-type/20_highlighting.yml @@ -0,0 +1,202 @@ +setup: + - skip: + version: " - 7.0.99" + reason: "added in 7.1.0" + + - do: + indices.create: + index: test + body: + settings: + number_of_replicas: 0 + mappings: + properties: + a_field: + type: search_as_you_type + analyzer: simple + max_shingle_size: 4 + text_field: + type: text + analyzer: simple + + - do: + index: + index: test + type: _doc + id: 1 + body: + a_field: "quick brown fox jump lazy dog" + text_field: "quick brown fox jump lazy dog" + + - do: + indices.refresh: {} + +--- +"phrase query": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_phrase: + a_field: "brown" + highlight: + fields: + a_field: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field.0: "quick brown fox jump lazy dog" } + +--- +"bool prefix query": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + a_field: "brown fo" + highlight: + fields: + a_field: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field.0: "quick brown fox jump lazy dog" } + +--- +"multi match bool prefix query 1 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fo" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: null } + - match: { hits.hits.0.highlight.a_field\._3gram: null } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 2 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: null } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 3 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._4gram: null } + +--- +"multi match bool prefix query 4 complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump lazy d" + type: "bool_prefix" + fields: [ "a_field", "a_field._2gram", "a_field._3gram", "a_field._4gram" ] + highlight: + fields: + a_field: + type: unified + a_field._2gram: + type: unified + a_field._3gram: + type: unified + a_field._4gram: + type: unified + + - match: { hits.total: 1 } + - match: { hits.hits.0._source.a_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0._source.text_field: "quick brown fox jump lazy dog" } + - match: { hits.hits.0.highlight.a_field: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._2gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._3gram: ["quick brown fox jump lazy dog"] } + - match: { hits.hits.0.highlight.a_field\._4gram: ["quick brown fox jump lazy dog"] } diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java index 92b2180f4da6c..bdedc65b7a6d3 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/AsyncBulkByScrollActionTests.java @@ -276,7 +276,7 @@ public void testBulkResponseSetsLotsOfStatus() { versionConflicts++; responses[i] = new BulkItemResponse(i, randomFrom(DocWriteRequest.OpType.values()), new Failure(shardId.getIndexName(), "type", "id" + i, - new VersionConflictEngineException(shardId, "type", "id", "test"))); + new VersionConflictEngineException(shardId, "id", "test"))); continue; } boolean createdResponse; diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java index c077c992beb60..917d196b6e9fb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/ReindexFailureTests.java @@ -81,7 +81,7 @@ public void testAbortOnVersionConflict() throws Exception { BulkByScrollResponse response = copy.get(); assertThat(response, matcher().batches(1).versionConflicts(1).failures(1).created(99)); for (Failure failure: response.getBulkFailures()) { - assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[_doc][")); + assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[")); } } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml index dd29e7701ba1c..d11f160bcf571 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/delete_by_query/10_basic.yml @@ -129,7 +129,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the current version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } @@ -185,7 +185,7 @@ - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml index 312a88ace5e92..9ef6c1a90c400 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/reindex/10_basic.yml @@ -160,7 +160,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.document.already.exists.\\(current.version.\\[\\d+\\]\\)/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.document.already.exists.\\(current.version.\\[\\d+\\]\\)/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: dest} - gte: { took: 0 } diff --git a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml index 15bc62214ebfb..08c8465c40960 100644 --- a/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml +++ b/modules/reindex/src/test/resources/rest-api-spec/test/update_by_query/10_basic.yml @@ -109,7 +109,7 @@ - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} # Use a regex so we don't mind if the current version isn't always 1. Sometimes it comes out 2. - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.current.version.\\[\\d+\\].is.different.than.the.one.provided.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } @@ -151,7 +151,7 @@ - match: {failures.0.id: "1"} - match: {failures.0.status: 409} - match: {failures.0.cause.type: version_conflict_engine_exception} - - match: {failures.0.cause.reason: "/\\[_doc\\]\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} + - match: {failures.0.cause.reason: "/\\[1\\]:.version.conflict,.required.seqNo.\\[\\d+\\]/"} - match: {failures.0.cause.shard: /\d+/} - match: {failures.0.cause.index: test} - gte: { took: 0 } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 73135c2a14560..69d84dfb78faf 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -20,12 +20,11 @@ package org.elasticsearch.http.netty4; import io.netty.channel.Channel; -import io.netty.channel.ChannelPromise; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.http.HttpChannel; import org.elasticsearch.http.HttpResponse; +import org.elasticsearch.transport.netty4.Netty4TcpChannel; import java.net.InetSocketAddress; @@ -36,38 +35,12 @@ public class Netty4HttpChannel implements HttpChannel { Netty4HttpChannel(Channel channel) { this.channel = channel; - this.channel.closeFuture().addListener(f -> { - if (f.isSuccess()) { - closeContext.complete(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - closeContext.completeExceptionally(new Exception(cause)); - } else { - closeContext.completeExceptionally((Exception) cause); - } - } - }); + Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @Override public void sendResponse(HttpResponse response, ActionListener listener) { - ChannelPromise writePromise = channel.newPromise(); - writePromise.addListener(f -> { - if (f.isSuccess()) { - listener.onResponse(null); - } else { - final Throwable cause = f.cause(); - ExceptionsHelper.maybeDieOnAnotherThread(cause); - if (cause instanceof Error) { - listener.onFailure(new Exception(cause)); - } else { - listener.onFailure((Exception) cause); - } - } - }); - channel.writeAndFlush(response, writePromise); + channel.writeAndFlush(response, Netty4TcpChannel.addPromise(listener, channel)); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 472e34d09fc40..cad95d2627083 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -38,7 +38,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler msg) throws Exception { + protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest msg) { Netty4HttpChannel channel = ctx.channel().attr(Netty4HttpServerTransport.HTTP_CHANNEL_KEY).get(); FullHttpRequest request = msg.getRequest(); @@ -72,7 +72,7 @@ protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest { - if (f.isSuccess()) { - closeContext.complete(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - closeContext.completeExceptionally(new Exception(cause)); - } else { - closeContext.completeExceptionally((Exception) cause); - } - } - }); + Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @Override diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java index 38527151695d8..4eca1803b6381 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4InternalESLogger.java @@ -22,7 +22,6 @@ import io.netty.util.internal.logging.AbstractInternalLogger; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.LogManager; import org.elasticsearch.common.SuppressLoggerChecks; @SuppressLoggerChecks(reason = "safely delegates to logger") diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java index ef96f75be89ca..4c68466efc4d6 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpChannel.java @@ -46,33 +46,54 @@ public class Netty4TcpChannel implements TcpChannel { this.isServer = isServer; this.profile = profile; this.connectContext = new CompletableContext<>(); - this.channel.closeFuture().addListener(f -> { + addListener(this.channel.closeFuture(), closeContext); + addListener(connectFuture, connectContext); + } + + /** + * Adds a listener that completes the given {@link CompletableContext} to the given {@link ChannelFuture}. + * @param channelFuture Channel future + * @param context Context to complete + */ + public static void addListener(ChannelFuture channelFuture, CompletableContext context) { + channelFuture.addListener(f -> { if (f.isSuccess()) { - closeContext.complete(null); + context.complete(null); } else { Throwable cause = f.cause(); if (cause instanceof Error) { ExceptionsHelper.maybeDieOnAnotherThread(cause); - closeContext.completeExceptionally(new Exception(cause)); + context.completeExceptionally(new Exception(cause)); } else { - closeContext.completeExceptionally((Exception) cause); + context.completeExceptionally((Exception) cause); } } }); + } - connectFuture.addListener(f -> { + /** + * Creates a {@link ChannelPromise} for the given {@link Channel} and adds a listener that invokes the given {@link ActionListener} + * on its completion. + * @param listener lister to invoke + * @param channel channel + * @return write promise + */ + public static ChannelPromise addPromise(ActionListener listener, Channel channel) { + ChannelPromise writePromise = channel.newPromise(); + writePromise.addListener(f -> { if (f.isSuccess()) { - connectContext.complete(null); + listener.onResponse(null); } else { - Throwable cause = f.cause(); + final Throwable cause = f.cause(); + ExceptionsHelper.maybeDieOnAnotherThread(cause); if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - connectContext.completeExceptionally(new Exception(cause)); + listener.onFailure(new Exception(cause)); } else { - connectContext.completeExceptionally((Exception) cause); + listener.onFailure((Exception) cause); } } }); + return writePromise; } @Override @@ -122,21 +143,7 @@ public InetSocketAddress getRemoteAddress() { @Override public void sendMessage(BytesReference reference, ActionListener listener) { - ChannelPromise writePromise = channel.newPromise(); - writePromise.addListener(f -> { - if (f.isSuccess()) { - listener.onResponse(null); - } else { - final Throwable cause = f.cause(); - ExceptionsHelper.maybeDieOnAnotherThread(cause); - if (cause instanceof Error) { - listener.onFailure(new Exception(cause)); - } else { - listener.onFailure((Exception) cause); - } - } - }); - channel.writeAndFlush(Netty4Utils.toByteBuf(reference), writePromise); + channel.writeAndFlush(Netty4Utils.toByteBuf(reference), addPromise(listener, channel)); if (channel.eventLoop().isShutdown()) { listener.onFailure(new TransportException("Cannot send message, event loop is shutting down.")); diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java index 9ef3f296f0601..830b0a8c203a4 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4TcpServerChannel.java @@ -20,7 +20,6 @@ package org.elasticsearch.transport.netty4; import io.netty.channel.Channel; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.concurrent.CompletableContext; import org.elasticsearch.transport.TcpServerChannel; @@ -36,19 +35,7 @@ public class Netty4TcpServerChannel implements TcpServerChannel { Netty4TcpServerChannel(Channel channel, String profile) { this.channel = channel; this.profile = profile; - this.channel.closeFuture().addListener(f -> { - if (f.isSuccess()) { - closeContext.complete(null); - } else { - Throwable cause = f.cause(); - if (cause instanceof Error) { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - closeContext.completeExceptionally(new Exception(cause)); - } else { - closeContext.completeExceptionally((Exception) cause); - } - } - }); + Netty4TcpChannel.addListener(this.channel.closeFuture(), closeContext); } @Override diff --git a/plugins/build.gradle b/plugins/build.gradle index 5b7d5f5faf26f..585f26c3780f8 100644 --- a/plugins/build.gradle +++ b/plugins/build.gradle @@ -21,7 +21,7 @@ configure(subprojects.findAll { it.parent.path == project.path }) { group = 'org.elasticsearch.plugin' // TODO exclude some plugins as they require features not yet supproted by testclusters - if (false == name in ['repository-azure', 'repository-hdfs', 'repository-s3']) { + if (false == name in ['repository-hdfs']) { apply plugin: 'elasticsearch.testclusters' } diff --git a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java index 9777174563626..7b195bdc7b434 100644 --- a/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java +++ b/plugins/mapper-annotated-text/src/main/java/org/elasticsearch/index/mapper/annotatedtext/AnnotatedTextFieldMapper.java @@ -591,7 +591,7 @@ public Query multiPhraseQuery(TokenStream stream, int slop, boolean enablePositi @Override public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) throws IOException { - return TextFieldMapper.createPhrasePrefixQuery(stream, name(), slop, maxExpansions); + return TextFieldMapper.createPhrasePrefixQuery(stream, name(), slop, maxExpansions, null, null); } } diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 27597e94976fa..a7c1af412d949 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -65,7 +65,9 @@ check { dependsOn 'qa:microsoft-azure-storage:check' } -integTestCluster { - keystoreSetting 'azure.client.integration_test.account', 'azure_account' - keystoreSetting 'azure.client.integration_test.key', 'azure_key' +testClusters { + integTest { + keystore 'azure.client.integration_test.account', 'azure_account' + keystore 'azure.client.integration_test.key', 'azure_key' + } } diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 34323fb930fce..946b377491d26 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -24,18 +24,19 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import java.nio.file.Files import java.nio.file.Path import java.nio.file.Paths - +apply plugin: 'elasticsearch.test.fixtures' + esplugin { description 'The HDFS repository plugin adds support for Hadoop Distributed File-System (HDFS) repositories.' classname 'org.elasticsearch.repositories.hdfs.HdfsPlugin' } -apply plugin: 'elasticsearch.vagrantsupport' - versions << [ 'hadoop2': '2.8.1' ] +testFixtures.useFixture ":test:fixtures:krb5kdc-fixture" + configurations { hdfsFixture } @@ -68,67 +69,27 @@ dependencyLicenses { mapping from: /hadoop-.*/, to: 'hadoop' } -// MIT Kerberos Vagrant Testing Fixture -String box = "krb5kdc" -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}" -] - -task krb5kdcUpdate(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars - dependsOn "vagrantCheckVersion", "virtualboxCheckVersion" -} - -task krb5kdcFixture(type: org.elasticsearch.gradle.test.VagrantFixture) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcUpdate -} - -task krb5AddPrincipals { - dependsOn krb5kdcFixture -} -List principals = [ "elasticsearch", "hdfs/hdfs.build.elastic.co" ] String realm = "BUILD.ELASTIC.CO" -for (String principal : principals) { - Task create = project.tasks.create("addPrincipal#${principal}".replace('/', '_'), org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture - } - krb5AddPrincipals.dependsOn(create) -} // Create HDFS File System Testing Fixtures for HA/Secure combinations for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', 'secureHaHdfsFixture']) { project.tasks.create(fixtureName, org.elasticsearch.gradle.test.AntFixture) { - dependsOn project.configurations.hdfsFixture + dependsOn project.configurations.hdfsFixture, project(':test:fixtures:krb5kdc-fixture').tasks.postProcessFixture executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.hdfsFixture.asPath }" waitCondition = { fixture, ant -> // the hdfs.MiniHDFS fixture writes the ports file when // it's ready, so we can just wait for the file to exist return fixture.portsFile.exists() - } + } final List miniHDFSArgs = [] // If it's a secure fixture, then depend on Kerberos Fixture and principals + add the krb5conf to the JVM options if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) { - dependsOn krb5kdcFixture, krb5AddPrincipals - Path krb5Config = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf") - miniHDFSArgs.add("-Djava.security.krb5.conf=${krb5Config}"); + miniHDFSArgs.add("-Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs")}"); if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { miniHDFSArgs.add('--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED') } @@ -145,9 +106,11 @@ for (String fixtureName : ['hdfsFixture', 'haHdfsFixture', 'secureHdfsFixture', // If it's a secure fixture, then set the principal name and keytab locations to use for auth. if (fixtureName.equals('secureHdfsFixture') || fixtureName.equals('secureHaHdfsFixture')) { - Path keytabPath = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("hdfs_hdfs.build.elastic.co.keytab") miniHDFSArgs.add("hdfs/hdfs.build.elastic.co@${realm}") - miniHDFSArgs.add("${keytabPath}") + miniHDFSArgs.add( + project(':test:fixtures:krb5kdc-fixture') + .ext.krb5Keytabs("hdfs", "hdfs_hdfs.build.elastic.co.keytab") + ) } args miniHDFSArgs.toArray() @@ -170,10 +133,11 @@ project.afterEvaluate { // If it's a secure cluster, add the keytab as an extra config, and set the krb5 conf in the JVM options. if (integTestTaskName.equals('integTestSecure') || integTestTaskName.equals('integTestSecureHa')) { - Path elasticsearchKT = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("elasticsearch.keytab").toAbsolutePath() - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - - restIntegTestTask.clusterConfig.extraConfigFile("repository-hdfs/krb5.keytab", "${elasticsearchKT}") + String krb5conf = project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("hdfs") + restIntegTestTask.clusterConfig.extraConfigFile( + "repository-hdfs/krb5.keytab", + "${project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs", "elasticsearch.keytab")}" + ) jvmArgs = jvmArgs + " " + "-Djava.security.krb5.conf=${krb5conf}" if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { jvmArgs = jvmArgs + " " + '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' @@ -189,9 +153,10 @@ project.afterEvaluate { if (project.runtimeJavaVersion == JavaVersion.VERSION_1_9) { restIntegTestTaskRunner.jvmArg '--add-opens=java.security.jgss/sun.security.krb5=ALL-UNNAMED' } - - Path hdfsKT = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("hdfs_hdfs.build.elastic.co.keytab").toAbsolutePath() - restIntegTestTaskRunner.systemProperty "test.krb5.keytab.hdfs", "${hdfsKT}" + restIntegTestTaskRunner.systemProperty ( + "test.krb5.keytab.hdfs", + project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab") + ) } } @@ -269,41 +234,25 @@ if (fixtureSupported) { integTestHa.setEnabled(false) } -// Secure HDFS testing relies on the Vagrant based Kerberos fixture. -boolean secureFixtureSupported = false -if (fixtureSupported) { - secureFixtureSupported = project.rootProject.vagrantSupported -} - -if (secureFixtureSupported) { - project.check.dependsOn(integTestSecure) - project.check.dependsOn(integTestSecureHa) +check.dependsOn(integTestSecure, integTestSecureHa) - // Fixture dependencies - integTestSecureCluster.dependsOn secureHdfsFixture, krb5kdcFixture - integTestSecureHaCluster.dependsOn secureHaHdfsFixture, krb5kdcFixture +// Fixture dependencies +integTestSecureCluster.dependsOn secureHdfsFixture +integTestSecureHaCluster.dependsOn secureHaHdfsFixture - // Set the keytab files in the classpath so that we can access them from test code without the security manager - // freaking out. - Path hdfsKeytabPath = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs") - project.dependencies { - testRuntime fileTree(dir: hdfsKeytabPath.toString(), include: ['*.keytab']) - } - - // Run just the secure hdfs rest test suite. - integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository' - // Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner. - integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class') - - // Only include the HA integration tests for the HA test task - integTestSecureHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) -} else { - // Security tests unsupported. Don't run these tests. - integTestSecure.enabled = false - integTestSecureHa.enabled = false - testingConventions.enabled = false +// Set the keytab files in the classpath so that we can access them from test code without the security manager +// freaking out. +project.dependencies { + testRuntime fileTree(dir: project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("hdfs","hdfs_hdfs.build.elastic.co.keytab").parent, include: ['*.keytab']) } +// Run just the secure hdfs rest test suite. +integTestSecureRunner.systemProperty 'tests.rest.suite', 'secure_hdfs_repository' +// Ignore HA integration Tests. They are included below as part of integTestSecureHa test runner. +integTestSecureRunner.exclude('**/Ha*TestSuiteIT.class') +// Only include the HA integration tests for the HA test task +integTestSecureHaRunner.patternSet.setIncludes(['**/Ha*TestSuiteIT.class']) + thirdPartyAudit { ignoreMissingClasses() ignoreViolations ( diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index b41174096e493..8a2edeb78c507 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -1,7 +1,6 @@ import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.MavenFilteringHack import org.elasticsearch.gradle.test.AntFixture -import org.elasticsearch.gradle.test.ClusterConfiguration import org.elasticsearch.gradle.test.RestIntegTestTask import com.carrotsearch.gradle.junit4.RandomizedTestingTask @@ -71,7 +70,7 @@ task testRepositoryCreds(type: RandomizedTestingTask) { include '**/S3BlobStoreRepositoryTests.class' systemProperty 'es.allow_insecure_settings', 'true' } -project.check.dependsOn(testRepositoryCreds) +check.dependsOn(testRepositoryCreds) unitTest { // these are tested explicitly in separate test tasks @@ -136,78 +135,61 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } -buildscript { - repositories { - maven { - url 'https://plugins.gradle.org/m2/' - } - } - dependencies { - classpath 'de.undercouch:gradle-download-task:3.4.3' - } -} - if (useFixture) { - apply plugin: 'elasticsearch.test.fixtures' - - RestIntegTestTask integTestMinio = project.tasks.create('integTestMinio', RestIntegTestTask.class) { - description = "Runs REST tests using the Minio repository." - } - - Task writeDockerFile = project.tasks.create('writeDockerFile') { + task writeDockerFile { File minioDockerfile = new File("${project.buildDir}/minio-docker/Dockerfile") outputs.file(minioDockerfile) doLast { minioDockerfile.parentFile.mkdirs() minioDockerfile.text = "FROM minio/minio:RELEASE.2019-01-23T23-18-58Z\n" + - "RUN mkdir -p /minio/data/${s3PermanentBucket}\n" + - "ENV MINIO_ACCESS_KEY ${s3PermanentAccessKey}\n" + - "ENV MINIO_SECRET_KEY ${s3PermanentSecretKey}" + "RUN mkdir -p /minio/data/${s3PermanentBucket}\n" + + "ENV MINIO_ACCESS_KEY ${s3PermanentAccessKey}\n" + + "ENV MINIO_SECRET_KEY ${s3PermanentSecretKey}" } } + preProcessFixture { + dependsOn(writeDockerFile) + } - preProcessFixture.dependsOn(writeDockerFile) - // The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks: - project.afterEvaluate { - // Only configure the Minio tests if postProcessFixture is configured to skip them if Docker is not available - // or fixtures have been disabled - if (postProcessFixture.enabled) { - ClusterConfiguration cluster = project.extensions.getByName('integTestMinioCluster') as ClusterConfiguration - cluster.dependsOn(project.bundlePlugin) - cluster.dependsOn(postProcessFixture) - cluster.keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - cluster.keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - - Closure minioAddressAndPort = { - int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" - assert minioPort > 0 - return 'http://127.0.0.1:' + minioPort - } - cluster.setting 's3.client.integration_test_permanent.endpoint', "${-> minioAddressAndPort.call()}" - - Task restIntegTestTask = project.tasks.getByName('integTestMinio') - restIntegTestTask.clusterConfig.plugin(project.path) - - // Default jvm arguments for all test clusters - String jvmArgs = "-Xms" + System.getProperty('tests.heap.size', '512m') + - " " + "-Xmx" + System.getProperty('tests.heap.size', '512m') + - " " + System.getProperty('tests.jvm.argline', '') - - restIntegTestTask.clusterConfig.jvmArgs = jvmArgs - project.check.dependsOn(integTestMinio) + task integTestMinio(type: RestIntegTestTask) { + description = "Runs REST tests using the Minio repository." + dependsOn tasks.bundlePlugin, tasks.postProcessFixture + runner { + // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 + systemProperty 'tests.rest.blacklist', [ + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/50_repository_ecs_credentials/*' + ].join(",") } } + check.dependsOn(integTestMinio) + BuildPlugin.requireDocker(tasks.integTestMinio) + + testClusters.integTestMinio { + keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey + keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey + setting 's3.client.integration_test_permanent.endpoint', { + int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" + assert minioPort > 0 + return 'http://127.0.0.1:' + minioPort + } + plugin file(tasks.bundlePlugin.archiveFile) + } - integTestMinioRunner.dependsOn(postProcessFixture) - // Minio only supports a single access key, see https://github.com/minio/minio/pull/5968 - integTestMinioRunner.systemProperty 'tests.rest.blacklist', [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' - ].join(",") - - BuildPlugin.requireDocker(integTestMinio) + integTest.runner { + systemProperty 'tests.rest.blacklist', 'repository_s3/50_repository_ecs_credentials/*' + } +} else { + integTest.runner { + systemProperty 'tests.rest.blacklist', + [ + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*', + 'repository_s3/50_repository_ecs_credentials/*' + ].join(",") + } } File parentFixtures = new File(project.buildDir, "fixtures") @@ -242,82 +224,65 @@ task s3Fixture(type: AntFixture) { args 'org.elasticsearch.repositories.s3.AmazonS3Fixture', baseDir, s3FixtureFile.getAbsolutePath() } -Map expansions = [ - 'permanent_bucket': s3PermanentBucket, - 'permanent_base_path': s3PermanentBasePath, - 'temporary_bucket': s3TemporaryBucket, - 'temporary_base_path': s3TemporaryBasePath, - 'ec2_bucket': s3EC2Bucket, - 'ec2_base_path': s3EC2BasePath, - 'ecs_bucket': s3ECSBucket, - 'ecs_base_path': s3ECSBasePath -] - processTestResources { + Map expansions = [ + 'permanent_bucket': s3PermanentBucket, + 'permanent_base_path': s3PermanentBasePath, + 'temporary_bucket': s3TemporaryBucket, + 'temporary_base_path': s3TemporaryBasePath, + 'ec2_bucket': s3EC2Bucket, + 'ec2_base_path': s3EC2BasePath, + 'ecs_bucket': s3ECSBucket, + 'ecs_base_path': s3ECSBasePath + ] inputs.properties(expansions) MavenFilteringHack.filter(it, expansions) } -project.afterEvaluate { - if (useFixture == false) { - // temporary_credentials, ec2_credentials and ecs_credentials are not ready for third-party-tests yet - integTestRunner.systemProperty 'tests.rest.blacklist', - [ - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*', - 'repository_s3/50_repository_ecs_credentials/*' - ].join(",") - } +integTest { + dependsOn s3Fixture } -integTestCluster { - keystoreSetting 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey - keystoreSetting 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey +testClusters.integTest { + keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey + keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - keystoreSetting 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey - keystoreSetting 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey - keystoreSetting 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken + keystore 's3.client.integration_test_temporary.access_key', s3TemporaryAccessKey + keystore 's3.client.integration_test_temporary.secret_key', s3TemporarySecretKey + keystore 's3.client.integration_test_temporary.session_token', s3TemporarySessionToken if (useFixture) { - dependsOn s3Fixture - /* Use a closure on the string to delay evaluation until tests are executed */ - setting 's3.client.integration_test_permanent.endpoint', "http://${-> s3Fixture.addressAndPort}" - setting 's3.client.integration_test_temporary.endpoint', "http://${-> s3Fixture.addressAndPort}" - setting 's3.client.integration_test_ec2.endpoint', "http://${-> s3Fixture.addressAndPort}" + setting 's3.client.integration_test_permanent.endpoint', { "http://${s3Fixture.addressAndPort}" } + setting 's3.client.integration_test_temporary.endpoint', { "http://${s3Fixture.addressAndPort}" } + setting 's3.client.integration_test_ec2.endpoint', { "http://${s3Fixture.addressAndPort}" } // to redirect InstanceProfileCredentialsProvider to custom auth point - systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", "http://${-> s3Fixture.addressAndPort}" + systemProperty "com.amazonaws.sdk.ec2MetadataServiceEndpointOverride", { "http://${s3Fixture.addressAndPort}" } } else { println "Using an external service to test the repository-s3 plugin" } } -integTestRunner.systemProperty 'tests.rest.blacklist', 'repository_s3/50_repository_ecs_credentials/*' - if (useFixture) { - RestIntegTestTask integTestECS = project.tasks.create('integTestECS', RestIntegTestTask.class) { + task integTestECS(type: RestIntegTestTask.class) { description = "Runs tests using the ECS repository." + dependsOn(project.s3Fixture) + runner { + systemProperty 'tests.rest.blacklist', [ + 'repository_s3/10_basic/*', + 'repository_s3/20_repository_permanent_credentials/*', + 'repository_s3/30_repository_temporary_credentials/*', + 'repository_s3/40_repository_ec2_credentials/*' + ].join(",") + } } + check.dependsOn(integTestECS) -// The following closure must execute before the afterEvaluate block in the constructor of the following integrationTest tasks: - project.afterEvaluate { - ClusterConfiguration cluster = project.extensions.getByName('integTestECSCluster') as ClusterConfiguration - cluster.dependsOn(project.s3Fixture) - - cluster.setting 's3.client.integration_test_ecs.endpoint', "http://${-> s3Fixture.addressAndPort}" - - Task integTestECSTask = project.tasks.getByName('integTestECS') - integTestECSTask.clusterConfig.plugin(project.path) - integTestECSTask.clusterConfig.environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', - "http://${-> s3Fixture.addressAndPort}/ecs_credentials_endpoint" - integTestECSRunner.systemProperty 'tests.rest.blacklist', [ - 'repository_s3/10_basic/*', - 'repository_s3/20_repository_permanent_credentials/*', - 'repository_s3/30_repository_temporary_credentials/*', - 'repository_s3/40_repository_ec2_credentials/*' - ].join(",") + testClusters.integTestECS { + setting 's3.client.integration_test_ecs.endpoint', { "http://${s3Fixture.addressAndPort}" } + plugin file(tasks.bundlePlugin.archiveFile) + environment 'AWS_CONTAINER_CREDENTIALS_FULL_URI', { "http://${s3Fixture.addressAndPort}/ecs_credentials_endpoint" } } - project.check.dependsOn(integTestECS) } thirdPartyAudit.ignoreMissingClasses ( @@ -446,8 +411,3 @@ if (project.runtimeJavaVersion <= JavaVersion.VERSION_1_8) { } else { thirdPartyAudit.ignoreMissingClasses 'javax.activation.DataHandler' } - -// AWS SDK is exposing some deprecated methods which we call using a delegate: -// * setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) -// * changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) -compileTestJava.options.compilerArgs << "-Xlint:-deprecation" diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java index e306e7c63ce5a..458359b299e75 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/test/PackageTestCase.java @@ -20,31 +20,45 @@ package org.elasticsearch.packaging.test; import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering; +import org.apache.http.client.fluent.Request; +import org.elasticsearch.packaging.util.FileUtils; import org.elasticsearch.packaging.util.Shell; import org.elasticsearch.packaging.util.Shell.Result; +import org.hamcrest.CoreMatchers; import org.junit.Before; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.Paths; import java.nio.file.StandardOpenOption; import java.util.regex.Matcher; import java.util.regex.Pattern; +import static org.elasticsearch.packaging.util.FileUtils.append; import static org.elasticsearch.packaging.util.FileUtils.assertPathsDontExist; +import static org.elasticsearch.packaging.util.FileUtils.assertPathsExist; +import static org.elasticsearch.packaging.util.FileUtils.cp; +import static org.elasticsearch.packaging.util.FileUtils.fileWithGlobExist; +import static org.elasticsearch.packaging.util.FileUtils.mkdir; import static org.elasticsearch.packaging.util.FileUtils.mv; +import static org.elasticsearch.packaging.util.FileUtils.rm; +import static org.elasticsearch.packaging.util.FileUtils.slurp; import static org.elasticsearch.packaging.util.Packages.SYSTEMD_SERVICE; import static org.elasticsearch.packaging.util.Packages.assertInstalled; import static org.elasticsearch.packaging.util.Packages.assertRemoved; import static org.elasticsearch.packaging.util.Packages.install; import static org.elasticsearch.packaging.util.Packages.remove; +import static org.elasticsearch.packaging.util.Packages.restartElasticsearch; import static org.elasticsearch.packaging.util.Packages.startElasticsearch; import static org.elasticsearch.packaging.util.Packages.stopElasticsearch; import static org.elasticsearch.packaging.util.Packages.verifyPackageInstallation; import static org.elasticsearch.packaging.util.Platforms.getOsRelease; import static org.elasticsearch.packaging.util.Platforms.isSystemd; +import static org.elasticsearch.packaging.util.ServerUtils.makeRequest; import static org.elasticsearch.packaging.util.ServerUtils.runElasticsearchTests; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.Matchers.containsString; @@ -55,42 +69,50 @@ @TestCaseOrdering(TestCaseOrdering.AlphabeticOrder.class) public abstract class PackageTestCase extends PackagingTestCase { + private Shell sh; @Before public void onlyCompatibleDistributions() { assumeTrue("only compatible distributions", distribution().packaging.compatible); + sh = newShell(); } public void test10InstallPackage() throws IOException { assertRemoved(distribution()); installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution(), newShell()); + verifyPackageInstallation(installation, distribution(), sh); } public void test20PluginsCommandWhenNoPlugins() { assumeThat(installation, is(notNullValue())); - assertThat(newShell().run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString()); + assertThat(sh.run(installation.bin("elasticsearch-plugin") + " list").stdout, isEmptyString()); } - public void test30InstallDoesNotStartServer() { + public void test30DaemonIsNotEnabledOnRestart() { + if (isSystemd()) { + sh.run("systemctl daemon-reload"); + String isEnabledOutput = sh.runIgnoreExitCode("systemctl is-enabled elasticsearch.service").stdout.trim(); + assertThat(isEnabledOutput, equalTo("disabled")); + } + } + + public void test31InstallDoesNotStartServer() { assumeThat(installation, is(notNullValue())); - assertThat(newShell().run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); + assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); } public void assertRunsWithJavaHome() throws IOException { - Shell sh = newShell(); - String systemJavaHome = sh.run("echo $SYSTEM_JAVA_HOME").stdout.trim(); byte[] originalEnvFile = Files.readAllBytes(installation.envFile); try { Files.write(installation.envFile, ("JAVA_HOME=" + systemJavaHome + "\n").getBytes(StandardCharsets.UTF_8), StandardOpenOption.APPEND); - startElasticsearch(); + startElasticsearch(sh); runElasticsearchTests(); - stopElasticsearch(); + stopElasticsearch(sh); } finally { Files.write(installation.envFile, originalEnvFile); } @@ -99,7 +121,7 @@ public void assertRunsWithJavaHome() throws IOException { assertThat(new String(Files.readAllBytes(log), StandardCharsets.UTF_8), containsString(systemJavaHome)); } - public void test31JavaHomeOverride() throws IOException { + public void test32JavaHomeOverride() throws IOException { assumeThat(installation, is(notNullValue())); // we always run with java home when no bundled jdk is included, so this test would be repetitive assumeThat(distribution().hasJdk, is(true)); @@ -121,11 +143,20 @@ public void test42BundledJdkRemoved() throws IOException { } public void test40StartServer() throws IOException { + String start = sh.runIgnoreExitCode("date ").stdout.trim(); assumeThat(installation, is(notNullValue())); - startElasticsearch(); + startElasticsearch(sh); + + String journalEntries = sh.runIgnoreExitCode("journalctl _SYSTEMD_UNIT=elasticsearch.service " + + "--since \"" + start + "\" --output cat | wc -l").stdout.trim(); + assertThat(journalEntries, equalTo("0")); + + assertPathsExist(installation.pidDir.resolve("elasticsearch.pid")); + assertPathsExist(installation.logs.resolve("elasticsearch_server.json")); + runElasticsearchTests(); - verifyPackageInstallation(installation, distribution(), newShell()); // check startup script didn't change permissions + verifyPackageInstallation(installation, distribution(), sh); // check startup script didn't change permissions } public void test50Remove() { @@ -134,7 +165,6 @@ public void test50Remove() { remove(distribution()); // removing must stop the service - final Shell sh = newShell(); assertThat(sh.run("ps aux").stdout, not(containsString("org.elasticsearch.bootstrap.Elasticsearch"))); if (isSystemd()) { @@ -184,9 +214,160 @@ public void test60Reinstall() throws IOException { installation = install(distribution()); assertInstalled(distribution()); - verifyPackageInstallation(installation, distribution(), newShell()); + verifyPackageInstallation(installation, distribution(), sh); remove(distribution()); assertRemoved(distribution()); } + + public void test70RestartServer() throws IOException { + try { + installation = install(distribution()); + assertInstalled(distribution()); + + startElasticsearch(sh); + restartElasticsearch(sh); + runElasticsearchTests(); + stopElasticsearch(sh); + } finally { + cleanup(); + } + } + + + public void test72TestRuntimeDirectory() throws IOException { + try { + installation = install(distribution()); + FileUtils.rm(installation.pidDir); + startElasticsearch(sh); + assertPathsExist(installation.pidDir); + stopElasticsearch(sh); + } finally { + cleanup(); + } + } + + public void test73gcLogsExist() throws IOException { + installation = install(distribution()); + startElasticsearch(sh); + // it can be gc.log or gc.log.0.current + assertThat(installation.logs, fileWithGlobExist("gc.log*")); + stopElasticsearch(sh); + } + + // TEST CASES FOR SYSTEMD ONLY + + + /** + * # Simulates the behavior of a system restart: + * # the PID directory is deleted by the operating system + * # but it should not block ES from starting + * # see https://github.com/elastic/elasticsearch/issues/11594 + */ + public void test80DeletePID_DIRandRestart() throws IOException { + assumeTrue(isSystemd()); + + rm(installation.pidDir); + + sh.run("systemd-tmpfiles --create"); + + startElasticsearch(sh); + + final Path pidFile = installation.pidDir.resolve("elasticsearch.pid"); + + assertTrue(Files.exists(pidFile)); + + stopElasticsearch(sh); + } + + public void test81CustomPathConfAndJvmOptions() throws IOException { + assumeTrue(isSystemd()); + + assumeThat(installation, is(notNullValue())); + assertPathsExist(installation.envFile); + + stopElasticsearch(sh); + + // The custom config directory is not under /tmp or /var/tmp because + // systemd's private temp directory functionally means different + // processes can have different views of what's in these directories + String temp = sh.runIgnoreExitCode("mktemp -p /etc -d").stdout.trim(); + final Path tempConf = Paths.get(temp); + + try { + mkdir(tempConf); + cp(installation.config("elasticsearch.yml"), tempConf.resolve("elasticsearch.yml")); + cp(installation.config("log4j2.properties"), tempConf.resolve("log4j2.properties")); + + // we have to disable Log4j from using JMX lest it will hit a security + // manager exception before we have configured logging; this will fail + // startup since we detect usages of logging before it is configured + final String jvmOptions = + "-Xms512m\n" + + "-Xmx512m\n" + + "-Dlog4j2.disable.jmx=true\n"; + append(tempConf.resolve("jvm.options"), jvmOptions); + + sh.runIgnoreExitCode("chown -R elasticsearch:elasticsearch " + tempConf); + + final Shell serverShell = newShell(); + cp(installation.envFile, tempConf.resolve("elasticsearch.bk"));//backup + append(installation.envFile, "ES_PATH_CONF=" + tempConf + "\n"); + append(installation.envFile, "ES_JAVA_OPTS=-XX:-UseCompressedOops"); + + startElasticsearch(serverShell); + + final String nodesResponse = makeRequest(Request.Get("http://localhost:9200/_nodes")); + assertThat(nodesResponse, CoreMatchers.containsString("\"heap_init_in_bytes\":536870912")); + assertThat(nodesResponse, CoreMatchers.containsString("\"using_compressed_ordinary_object_pointers\":\"false\"")); + + stopElasticsearch(serverShell); + + } finally { + rm(installation.envFile); + cp(tempConf.resolve("elasticsearch.bk"), installation.envFile); + rm(tempConf); + cleanup(); + } + } + + public void test82SystemdMask() throws IOException { + try { + assumeTrue(isSystemd()); + + sh.run("systemctl mask systemd-sysctl.service"); + + installation = install(distribution()); + + sh.run("systemctl unmask systemd-sysctl.service"); + } finally { + cleanup(); + } + } + + public void test83serviceFileSetsLimits() throws IOException { + // Limits are changed on systemd platforms only + assumeTrue(isSystemd()); + + installation = install(distribution()); + + startElasticsearch(sh); + + final Path pidFile = installation.pidDir.resolve("elasticsearch.pid"); + assertTrue(Files.exists(pidFile)); + String pid = slurp(pidFile).trim(); + String maxFileSize = sh.run("cat /proc/%s/limits | grep \"Max file size\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxFileSize, equalTo("unlimited")); + + String maxProcesses = sh.run("cat /proc/%s/limits | grep \"Max processes\" | awk '{ print $3 }'", pid).stdout.trim(); + assertThat(maxProcesses, equalTo("4096")); + + String maxOpenFiles = sh.run("cat /proc/%s/limits | grep \"Max open files\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxOpenFiles, equalTo("65535")); + + String maxAddressSpace = sh.run("cat /proc/%s/limits | grep \"Max address space\" | awk '{ print $4 }'", pid).stdout.trim(); + assertThat(maxAddressSpace, equalTo("unlimited")); + + stopElasticsearch(sh); + } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java index 10d1b3ee6b6de..efbf0bd74a354 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/FileUtils.java @@ -20,6 +20,8 @@ package org.elasticsearch.packaging.util; import org.elasticsearch.core.internal.io.IOUtils; +import org.hamcrest.FeatureMatcher; +import org.hamcrest.Matcher; import java.io.BufferedWriter; import java.io.IOException; @@ -34,9 +36,11 @@ import java.nio.file.attribute.PosixFileAttributes; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.emptyIterable; import static org.hamcrest.core.IsNot.not; import static org.hamcrest.text.IsEmptyString.isEmptyOrNullString; import static org.junit.Assert.assertFalse; @@ -69,6 +73,15 @@ public static void rm(Path... paths) { } } + public static Path mktempDir(Path path) { + try { + return Files.createTempDirectory(path,"tmp"); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + public static Path mkdir(Path path) { try { return Files.createDirectories(path); @@ -176,6 +189,20 @@ public static void assertPathsExist(Path... paths) { Arrays.stream(paths).forEach(path -> assertTrue(path + " should exist", Files.exists(path))); } + public static Matcher fileWithGlobExist(String glob) throws IOException { + return new FeatureMatcher>(not(emptyIterable()),"File with pattern exist", "file with pattern"){ + + @Override + protected Iterable featureValueOf(Path actual) { + try { + return Files.newDirectoryStream(actual,glob); + } catch (IOException e) { + return Collections.emptyList(); + } + } + }; + } + public static void assertPathsDontExist(Path... paths) { Arrays.stream(paths).forEach(path -> assertFalse(path + " should not exist", Files.exists(path))); } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java index 7014a627a7aee..afa7e371c2c55 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Packages.java @@ -270,8 +270,7 @@ private static void verifyDefaultInstallation(Installation es) { ).forEach(configFile -> assertThat(es.config(configFile), file(File, "root", "elasticsearch", p660))); } - public static void startElasticsearch() throws IOException { - final Shell sh = new Shell(); + public static void startElasticsearch(Shell sh) throws IOException { if (isSystemd()) { sh.run("systemctl daemon-reload"); sh.run("systemctl enable elasticsearch.service"); @@ -281,6 +280,10 @@ public static void startElasticsearch() throws IOException { sh.run("service elasticsearch start"); } + assertElasticsearchStarted(sh); + } + + public static void assertElasticsearchStarted(Shell sh) throws IOException { waitForElasticsearch(); if (isSystemd()) { @@ -291,12 +294,21 @@ public static void startElasticsearch() throws IOException { } } - public static void stopElasticsearch() throws IOException { - final Shell sh = new Shell(); + public static void stopElasticsearch(Shell sh) throws IOException { if (isSystemd()) { sh.run("systemctl stop elasticsearch.service"); } else { sh.run("service elasticsearch stop"); } } + + public static void restartElasticsearch(Shell sh) throws IOException { + if (isSystemd()) { + sh.run("systemctl restart elasticsearch.service"); + } else { + sh.run("service elasticsearch restart"); + } + + waitForElasticsearch(); + } } diff --git a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java index b437438130552..dc490de05b9c8 100644 --- a/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java +++ b/qa/vagrant/src/main/java/org/elasticsearch/packaging/util/Shell.java @@ -27,6 +27,7 @@ import java.io.InputStreamReader; import java.nio.file.Path; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.stream.Stream; @@ -67,6 +68,10 @@ public Result runIgnoreExitCode(String script) { return runScriptIgnoreExitCode(getScriptCommand(script)); } + public Result run( String command, Object... args) { + String formattedCommand = String.format(Locale.ROOT, command, args); + return run(formattedCommand); + } private String[] getScriptCommand(String script) { if (Platforms.WINDOWS) { return powershellCommand(script); diff --git a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats b/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats deleted file mode 100644 index 8baa75f38f5bc..0000000000000 --- a/qa/vagrant/src/test/resources/packaging/tests/60_systemd.bats +++ /dev/null @@ -1,257 +0,0 @@ -#!/usr/bin/env bats - -# This file is used to test the elasticsearch Systemd setup. - -# WARNING: This testing file must be executed as root and can -# dramatically change your system. It should only be executed -# in a throw-away VM like those made by the Vagrantfile at -# the root of the Elasticsearch source code. This should -# cause the script to fail if it is executed any other way: -[ -f /etc/is_vagrant_vm ] || { - >&2 echo "must be run on a vagrant VM" - exit 1 -} - -# The test case can be executed with the Bash Automated -# Testing System tool available at https://github.com/sstephenson/bats -# Thanks to Sam Stephenson! - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# Load test utilities -load $BATS_UTILS/utils.bash -load $BATS_UTILS/packages.bash -load $BATS_UTILS/plugins.bash - -# Cleans everything for the 1st execution -setup() { - skip_not_systemd - skip_not_dpkg_or_rpm - export_elasticsearch_paths -} - -@test "[SYSTEMD] install elasticsearch" { - clean_before_test - install_package -} - -@test "[SYSTEMD] daemon reload after install" { - systemctl daemon-reload -} - -@test "[SYSTEMD] daemon isn't enabled on restart" { - # Rather than restart the VM we just ask systemd if it plans on starting - # elasticsearch on restart. Not as strong as a restart but much much - # faster. - run systemctl is-enabled elasticsearch.service - [ "$output" = "disabled" ] -} - -@test "[SYSTEMD] enable" { - systemctl enable elasticsearch.service - - systemctl is-enabled elasticsearch.service -} - -@test "[SYSTEMD] start" { - # Capture the current epoch in millis - run date +%s - epoch="$output" - - # The OpenJDK packaged for CentOS and OEL both override the default value (false) for the JVM option "AssumeMP". - # - # Because it is forced to "true" by default for these packages, the following warning message is printed to the - # standard output when the Vagrant box has only 1 CPU: - # OpenJDK 64-Bit Server VM warning: If the number of processors is expected to increase from one, then you should configure - # the number of parallel GC threads appropriately using -XX:ParallelGCThreads=N - # - # This message will then fail the next test where we check if no entries have been added to the journal. - # - # This message appears since with java-1.8.0-openjdk-1.8.0.111-1.b15.el7_2.x86_64 because of the commit: - # 2016-10-10 - Andrew Hughes - 1:1.8.0.111-1.b15 - Turn debug builds on for all JIT architectures. - # Always AssumeMP on RHEL. - # - Resolves: rhbz#1381990 - # - if [ -x "$(command -v lsb_release)" ]; then - # Here we set the "-XX:-AssumeMP" option to false again: - lsb_release=$(lsb_release -i) - if [[ "$lsb_release" =~ "CentOS" ]] || [[ "$lsb_release" =~ "OracleServer" ]]; then - echo "-XX:-AssumeMP" >> $ESCONFIG/jvm.options - fi - fi - - systemctl start elasticsearch.service - wait_for_elasticsearch_status - assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - assert_file_exist "/var/log/elasticsearch/elasticsearch_server.json" - - # Converts the epoch back in a human readable format - run date --date=@$epoch "+%Y-%m-%d %H:%M:%S" - since="$output" - - # Verifies that no new entries in journald have been added - # since the last start - result="$(journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" --output cat | wc -l)" - [ "$result" -eq "0" ] || { - echo "Expected no entries in journalctl for the Elasticsearch service but found:" - journalctl _SYSTEMD_UNIT=elasticsearch.service --since "$since" - false - } -} - -@test "[SYSTEMD] start (running)" { - systemctl start elasticsearch.service -} - -@test "[SYSTEMD] is active (running)" { - run systemctl is-active elasticsearch.service - [ "$status" -eq 0 ] - [ "$output" = "active" ] -} - -@test "[SYSTEMD] status (running)" { - systemctl status elasticsearch.service -} - -################################## -# Check that Elasticsearch is working -################################## -@test "[SYSTEMD] test elasticsearch" { - run_elasticsearch_tests -} - -@test "[SYSTEMD] restart" { - systemctl restart elasticsearch.service - - wait_for_elasticsearch_status - - service elasticsearch status -} - -@test "[SYSTEMD] stop (running)" { - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] status (stopping)" { - run systemctl status elasticsearch.service - # I'm not sure why suse exits 0 here, but it does - if [ ! -e /etc/SuSE-release ]; then - [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped but got $status" - fi - echo "$output" | grep "Active:" | grep "inactive" -} - -@test "[SYSTEMD] stop (stopped)" { - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] status (stopped)" { - run systemctl status elasticsearch.service - # I'm not sure why suse exits 0 here, but it does - if [ ! -e /etc/SuSE-release ]; then - [ "$status" -eq 3 ] || "Expected exit code 3 meaning stopped but got $status" - fi - echo "$output" | grep "Active:" | grep "inactive" -} - -# Simulates the behavior of a system restart: -# the PID directory is deleted by the operating system -# but it should not block ES from starting -# see https://github.com/elastic/elasticsearch/issues/11594 -@test "[SYSTEMD] delete PID_DIR and restart" { - rm -rf /var/run/elasticsearch - - systemd-tmpfiles --create - - systemctl start elasticsearch.service - - wait_for_elasticsearch_status - - assert_file_exist "/var/run/elasticsearch/elasticsearch.pid" - - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] start Elasticsearch with custom JVM options" { - assert_file_exist $ESENVFILE - # The custom config directory is not under /tmp or /var/tmp because - # systemd's private temp directory functionally means different - # processes can have different views of what's in these directories - local temp=`mktemp -p /etc -d` - cp "$ESCONFIG"/elasticsearch.yml "$temp" - cp "$ESCONFIG"/log4j2.properties "$temp" - touch "$temp/jvm.options" - chown -R elasticsearch:elasticsearch "$temp" - echo "-Xms512m" >> "$temp/jvm.options" - echo "-Xmx512m" >> "$temp/jvm.options" - # we have to disable Log4j from using JMX lest it will hit a security - # manager exception before we have configured logging; this will fail - # startup since we detect usages of logging before it is configured - echo "-Dlog4j2.disable.jmx=true" >> "$temp/jvm.options" - cp $ESENVFILE "$temp/elasticsearch" - echo "ES_PATH_CONF=\"$temp\"" >> $ESENVFILE - echo "ES_JAVA_OPTS=\"-XX:-UseCompressedOops\"" >> $ESENVFILE - service elasticsearch start - wait_for_elasticsearch_status - curl -s -XGET localhost:9200/_nodes | fgrep '"heap_init_in_bytes":536870912' - curl -s -XGET localhost:9200/_nodes | fgrep '"using_compressed_ordinary_object_pointers":"false"' - service elasticsearch stop - cp "$temp/elasticsearch" $ESENVFILE -} - -@test "[SYSTEMD] masking systemd-sysctl" { - clean_before_test - - systemctl mask systemd-sysctl.service - install_package - - systemctl unmask systemd-sysctl.service -} - -@test "[SYSTEMD] service file sets limits" { - clean_before_test - install_package - systemctl start elasticsearch.service - wait_for_elasticsearch_status - local pid=$(cat /var/run/elasticsearch/elasticsearch.pid) - local max_file_size=$(cat /proc/$pid/limits | grep "Max file size" | awk '{ print $4 }') - [ "$max_file_size" == "unlimited" ] - local max_processes=$(cat /proc/$pid/limits | grep "Max processes" | awk '{ print $3 }') - [ "$max_processes" == "4096" ] - local max_open_files=$(cat /proc/$pid/limits | grep "Max open files" | awk '{ print $4 }') - [ "$max_open_files" == "65535" ] - local max_address_space=$(cat /proc/$pid/limits | grep "Max address space" | awk '{ print $4 }') - [ "$max_address_space" == "unlimited" ] - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] test runtime directory" { - clean_before_test - install_package - sudo rm -rf /var/run/elasticsearch - systemctl start elasticsearch.service - wait_for_elasticsearch_status - [ -d /var/run/elasticsearch ] - systemctl stop elasticsearch.service -} - -@test "[SYSTEMD] GC logs exist" { - start_elasticsearch_service - assert_file_exist /var/log/elasticsearch/gc.log.0.current - stop_elasticsearch_service -} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml index b8c922c98c15b..732a53aeea4f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.allocation_explain/10_basic.yml @@ -12,10 +12,6 @@ - match: { acknowledged: true } - - do: - cluster.state: - metric: [ master_node ] - - do: cluster.allocation_explain: body: { "index": "test", "shard": 0, "primary": true } @@ -37,10 +33,6 @@ index: test body: { "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 9 } } - - do: - cluster.state: - metric: [ master_node ] - - do: cluster.allocation_explain: include_disk_info: true diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml index 5419acb9321f4..248b47d07a71e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/cluster.reroute/11_explain.yml @@ -25,12 +25,14 @@ setup: --- "Explain API for non-existent node & shard": + - skip: + features: [arbitrary_key] - do: - cluster.state: - metric: [ master_node ] - - - set: {master_node: node_id} + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: cluster.reroute: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index a6d6bb0730548..41c851b71cc6c 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -3,18 +3,20 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" - # creates an index with one document solely allocated on the master node + features: [warnings, arbitrary_key] + + # creates an index with one document solely allocated on a particular data node # and shrinks it into a new index with a single shard # we don't do the relocation to a single node after the index is created # here since in a mixed version cluster we can't identify # which node is the one with the highest version and that is the only one that can safely # be used to shrink the index. - - do: - cluster.state: {} - # Get master node id - - set: { master_node: master } + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -22,8 +24,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on a single node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same data node + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index f12864236d7bd..dec0760fc6b19 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [warnings, arbitrary_key] - do: - cluster.state: {} - # Get master node id - - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id # create index - do: @@ -19,7 +19,7 @@ body: settings: # ensure everything is allocated on a single node - index.routing.allocation.include._id: $master + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 mappings: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 3add4b100d812..eda095ff91f98 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [warnings, arbitrary_key] - do: - cluster.state: {} - - # get master node id - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -17,8 +17,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on the master node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same node + index.routing.allocation.include._id: $node_id index.number_of_shards: 2 index.number_of_replicas: 0 index.merge.scheduler.max_merge_count: 4 @@ -60,4 +60,4 @@ - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { copy-settings-target.settings.index.blocks.write: "true" } - - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index 8cf932b1c1159..df9eae0adf340 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -3,13 +3,13 @@ - skip: version: " - 6.9.99" reason: expects warnings that pre-7.0.0 will not send - features: "warnings" + features: [arbitrary_key, warnings] - do: - cluster.state: {} - - # get master node id - - set: { master_node: master } + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id - do: indices.create: @@ -17,8 +17,8 @@ wait_for_active_shards: 1 body: settings: - # ensure everything is allocated on the master node - index.routing.allocation.include._id: $master + # ensure everything is allocated on the same node + index.routing.allocation.include._id: $node_id index.number_of_replicas: 0 index.number_of_shards: 1 index.number_of_routing_shards: 4 @@ -62,4 +62,4 @@ - match: { copy-settings-target.settings.index.merge.scheduler.max_merge_count: "4" } - match: { copy-settings-target.settings.index.merge.scheduler.max_thread_count: "2" } - match: { copy-settings-target.settings.index.blocks.write: "true" } - - match: { copy-settings-target.settings.index.routing.allocation.include._id: $master } + - match: { copy-settings-target.settings.index.routing.allocation.include._id: $node_id } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml index 47f6c3e21141a..5821117f4c005 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/10_basic.yml @@ -1,14 +1,13 @@ +setup: + - skip: + features: [arbitrary_key] --- "node_info test": - - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } - - do: nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - is_true: nodes - is_true: cluster_name - - is_true: nodes.$master.roles + - is_true: nodes.$node_id.roles diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml index efd2260356a2d..09102157bcb99 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/20_transport.yml @@ -2,15 +2,15 @@ "node_info test profile is empty": - skip: - features: stash_in_path + features: [stash_in_path, arbitrary_key] - do: - cluster.state: {} - - - set: {master_node: master} + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.info: metric: [ transport ] - - is_true: nodes.$master.transport.profiles + - is_true: nodes.$node_id.transport.profiles diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml index a63c246b6033e..99b8b6f361a47 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.info/30_settings.yml @@ -1,19 +1,22 @@ --- "node_info test flat_settings": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.info: metric: [ settings ] - - match : { nodes.$master.settings.client.type: node } + - match : { nodes.$node_id.settings.client.type: node } - do: nodes.info: metric: [ settings ] flat_settings: true - - match : { nodes.$master.settings.client\.type: node } + - match : { nodes.$node_id.settings.client\.type: node } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml index 61614e7f8e1b7..099483be9aded 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/10_basic.yml @@ -9,17 +9,20 @@ --- "Nodes stats level": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: metric: [ indices ] level: "indices" - - is_true: nodes.$master.indices.indices + - is_true: nodes.$node_id.indices.indices --- "Nodes stats unrecognized parameter": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml index 998909dd9cf1b..a09619b7255c3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/11_indices_metrics.yml @@ -1,211 +1,227 @@ --- "Metric - blank": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: {} - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - _all": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: _all } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - indices _all": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: _all } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - one": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: docs } - - is_true: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery + - is_true: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - multi": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: [ store, get, merge ] } - - is_false: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery + - is_false: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery --- "Metric - recovery": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: [ recovery ] } - - is_false: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery --- "Metric - _all include_segment_file_sizes": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: _all, include_segment_file_sizes: true } - - is_true: nodes.$master.indices.docs - - is_true: nodes.$master.indices.store - - is_true: nodes.$master.indices.indexing - - is_true: nodes.$master.indices.get - - is_true: nodes.$master.indices.search - - is_true: nodes.$master.indices.merges - - is_true: nodes.$master.indices.refresh - - is_true: nodes.$master.indices.flush - - is_true: nodes.$master.indices.warmer - - is_true: nodes.$master.indices.query_cache - - is_true: nodes.$master.indices.fielddata - - is_true: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_true: nodes.$master.indices.translog - - is_true: nodes.$master.indices.recovery - - is_true: nodes.$master.indices.segments.file_sizes + - is_true: nodes.$node_id.indices.docs + - is_true: nodes.$node_id.indices.store + - is_true: nodes.$node_id.indices.indexing + - is_true: nodes.$node_id.indices.get + - is_true: nodes.$node_id.indices.search + - is_true: nodes.$node_id.indices.merges + - is_true: nodes.$node_id.indices.refresh + - is_true: nodes.$node_id.indices.flush + - is_true: nodes.$node_id.indices.warmer + - is_true: nodes.$node_id.indices.query_cache + - is_true: nodes.$node_id.indices.fielddata + - is_true: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_true: nodes.$node_id.indices.translog + - is_true: nodes.$node_id.indices.recovery + - is_true: nodes.$node_id.indices.segments.file_sizes --- "Metric - segments include_segment_file_sizes": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: nodes.stats: { metric: indices, index_metric: segments, include_segment_file_sizes: true } - - is_false: nodes.$master.indices.docs - - is_false: nodes.$master.indices.store - - is_false: nodes.$master.indices.indexing - - is_false: nodes.$master.indices.get - - is_false: nodes.$master.indices.search - - is_false: nodes.$master.indices.merges - - is_false: nodes.$master.indices.refresh - - is_false: nodes.$master.indices.flush - - is_false: nodes.$master.indices.warmer - - is_false: nodes.$master.indices.query_cache - - is_false: nodes.$master.indices.fielddata - - is_false: nodes.$master.indices.completion - - is_true: nodes.$master.indices.segments - - is_false: nodes.$master.indices.translog - - is_false: nodes.$master.indices.recovery - - is_true: nodes.$master.indices.segments.file_sizes + - is_false: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.store + - is_false: nodes.$node_id.indices.indexing + - is_false: nodes.$node_id.indices.get + - is_false: nodes.$node_id.indices.search + - is_false: nodes.$node_id.indices.merges + - is_false: nodes.$node_id.indices.refresh + - is_false: nodes.$node_id.indices.flush + - is_false: nodes.$node_id.indices.warmer + - is_false: nodes.$node_id.indices.query_cache + - is_false: nodes.$node_id.indices.fielddata + - is_false: nodes.$node_id.indices.completion + - is_true: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.indices.translog + - is_false: nodes.$node_id.indices.recovery + - is_true: nodes.$node_id.indices.segments.file_sizes diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml index 432e5d8c207ec..a478fd7d3f235 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/20_response_filtering.yml @@ -1,10 +1,11 @@ --- "Nodes Stats with response filtering": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id # Nodes Stats with no filtering - do: @@ -12,18 +13,18 @@ - is_true: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 } + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 } # Nodes Stats with only "cluster_name" field - do: @@ -32,9 +33,9 @@ - is_true: cluster_name - is_false: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes" field and sub-fields - do: @@ -43,18 +44,18 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - gte: { nodes.$master.jvm.buffer_pools.direct.used_in_bytes: 0 } + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - gte: { nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes: 0 } # Nodes Stats with "nodes.*.indices" field and sub-fields - do: @@ -63,13 +64,13 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_false: nodes.$master.jvm + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes.*.name" and "nodes.*.indices.docs.count" fields - do: @@ -78,12 +79,12 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_false: nodes.$master.indices.segments - - is_false: nodes.$master.jvm + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_false: nodes.$node_id.indices.segments + - is_false: nodes.$node_id.jvm # Nodes Stats with all "count" fields - do: @@ -92,18 +93,18 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - gte: { nodes.$master.indices.docs.count: 0 } - - is_true: nodes.$master.indices.segments - - gte: { nodes.$master.indices.segments.count: 0 } - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - gte: { nodes.$node_id.indices.docs.count: 0 } + - is_true: nodes.$node_id.indices.segments + - gte: { nodes.$node_id.indices.segments.count: 0 } + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes # Nodes Stats with all "count" fields in sub-fields of "jvm" field - do: @@ -112,16 +113,16 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.indices.docs.count - - is_false: nodes.$master.indices.segments.count - - is_true: nodes.$master.jvm - - is_true: nodes.$master.jvm.threads - - gte: { nodes.$master.jvm.threads.count: 0 } - - is_true: nodes.$master.jvm.buffer_pools.direct - - gte: { nodes.$master.jvm.buffer_pools.direct.count: 0 } - - is_false: nodes.$master.jvm.buffer_pools.direct.used_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.indices.docs.count + - is_false: nodes.$node_id.indices.segments.count + - is_true: nodes.$node_id.jvm + - is_true: nodes.$node_id.jvm.threads + - gte: { nodes.$node_id.jvm.threads.count: 0 } + - is_true: nodes.$node_id.jvm.buffer_pools.direct + - gte: { nodes.$node_id.jvm.buffer_pools.direct.count: 0 } + - is_false: nodes.$node_id.jvm.buffer_pools.direct.used_in_bytes # Nodes Stats with "nodes.*.fs.data" fields - do: @@ -130,13 +131,13 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_true: nodes.$master.fs.data.0.path - - is_true: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_true: nodes.$node_id.fs.data.0.path + - is_true: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes # Nodes Stats with "nodes.*.fs.data.t*" fields - do: @@ -145,21 +146,22 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_false: nodes.$master.fs.data.0.path - - is_true: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_false: nodes.$node_id.fs.data.0.path + - is_true: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes --- "Nodes Stats filtered using both includes and excludes filters": + - skip: + features: [arbitrary_key] - do: - cluster.state: {} - - # Get master node id - - set: { master_node: master } + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id # Nodes Stats with "nodes" field but no JVM stats - do: @@ -168,10 +170,10 @@ - is_false: cluster_name - is_true: nodes - - is_true: nodes.$master.name - - is_true: nodes.$master.os - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm + - is_true: nodes.$node_id.name + - is_true: nodes.$node_id.os + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm # Nodes Stats with "nodes.*.indices" field and sub-fields but no indices segments - do: @@ -180,10 +182,10 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_true: nodes.$master.indices - - is_true: nodes.$master.indices.docs - - is_false: nodes.$master.indices.segments + - is_false: nodes.$node_id.name + - is_true: nodes.$node_id.indices + - is_true: nodes.$node_id.indices.docs + - is_false: nodes.$node_id.indices.segments # Nodes Stats with "nodes.*.fs.data.t*" fields but no "type" field - do: @@ -192,9 +194,9 @@ - is_false: cluster_name - is_true: nodes - - is_false: nodes.$master.name - - is_false: nodes.$master.indices - - is_false: nodes.$master.jvm - - is_true: nodes.$master.fs.data - - is_false: nodes.$master.fs.data.0.type - - is_true: nodes.$master.fs.data.0.total_in_bytes + - is_false: nodes.$node_id.name + - is_false: nodes.$node_id.indices + - is_false: nodes.$node_id.jvm + - is_true: nodes.$node_id.fs.data + - is_false: nodes.$node_id.fs.data.0.type + - is_true: nodes.$node_id.fs.data.0.total_in_bytes diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml index ad8058876ae49..a6b7f29a183c8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/nodes.stats/30_discovery.yml @@ -1,13 +1,13 @@ --- "Discovery stats": - skip: - version: " - 6.0.99" - reason: "published_cluster_states_received arrived in 6.1.0" - - do: - cluster.state: {} + features: [arbitrary_key] - # Get master node id - - set: { master_node: master } + - do: + nodes.info: + node_id: _master + - set: + nodes._arbitrary_key_: master - do: nodes.stats: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml new file mode 100644 index 0000000000000..bcc28c7853425 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/310_match_bool_prefix.yml @@ -0,0 +1,363 @@ +setup: + - skip: + version: " - 7.0.99" + reason: "added in 7.1.0" + + - do: + indices.create: + index: test + body: + mappings: + properties: + my_field1: + type: text + my_field2: + type: text + + - do: + index: + index: test + id: 1 + body: + my_field1: "brown fox jump" + my_field2: "xylophone" + + - do: + index: + index: test + id: 2 + body: + my_field1: "brown emu jump" + my_field2: "xylophone" + + - do: + index: + index: test + id: 3 + body: + my_field1: "jumparound" + my_field2: "emu" + + - do: + index: + index: test + id: 4 + body: + my_field1: "dog" + my_field2: "brown fox jump lazy" + + - do: + indices.refresh: {} + +--- +"scoring complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: "brown fox jump" + + - match: { hits.total: 3 } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + - match: { hits.hits.1._source.my_field1: "brown emu jump" } + - match: { hits.hits.2._source.my_field1: "jumparound" } + +--- +"scoring partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: "brown fox ju" + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._source.my_field1: "brown emu jump" } + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.2._source.my_field1: "jumparound" } + +--- +"minimum should match": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "brown fox jump" + minimum_should_match: 3 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + +--- +"analyzer": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "BROWN dog" + analyzer: whitespace # this analyzer doesn't lowercase terms + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + +--- +"operator": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field1: + query: "brown fox jump" + operator: AND + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + +--- +"fuzziness": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + match_bool_prefix: + my_field2: + query: "xylophoen foo" + fuzziness: 1 + prefix_length: 1 + max_expansions: 10 + fuzzy_transpositions: true + fuzzy_rewrite: constant_score + + - match: { hits.total: 2 } + - match: { hits.hits.0._source.my_field2: "xylophone" } + - match: { hits.hits.1._source.my_field2: "xylophone" } + +--- +"multi_match single field complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump" + type: bool_prefix + fields: [ "my_field1" ] + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._source.my_field1: "brown emu jump" } + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.2._source.my_field1: "jumparound" } + +--- +"multi_match single field partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox ju" + type: bool_prefix + fields: [ "my_field1" ] + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "1" } + - match: { hits.hits.0._source.my_field1: "brown fox jump" } + - match: { hits.hits.1._id: "2" } + - match: { hits.hits.1._source.my_field1: "brown emu jump" } + - match: { hits.hits.2._id: "3" } + - match: { hits.hits.2._source.my_field1: "jumparound" } + +--- +"multi_match multiple fields complete term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump lazy" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1._source.my_field1: "brown fox jump" } + - match: { hits.hits.2._id: "2" } + - match: { hits.hits.2._source.my_field1: "brown emu jump" } + +--- +"multi_match multiple fields partial term": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump laz" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + + - match: { hits.total: 3 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + - match: { hits.hits.1._id: "1" } + - match: { hits.hits.1._source.my_field1: "brown fox jump" } + - match: { hits.hits.2._id: "2" } + - match: { hits.hits.2._source.my_field1: "brown emu jump" } + +--- +"multi_match multiple fields with analyzer": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "BROWN FOX JUMP dog" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + analyzer: whitespace # this analyzer doesn't lowercase terms + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with minimum_should_match": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown fox jump la" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + minimum_should_match: 4 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with fuzziness": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "dob nomatch" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + fuzziness: 1 + + - match: { hits.total: 1 } + - match: { hits.hits.0._id: "4" } + - match: { hits.hits.0._source.my_field1: "dog" } + - match: { hits.hits.0._source.my_field2: "brown fox jump lazy" } + +--- +"multi_match multiple fields with boost": + + - do: + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown emu" + type: bool_prefix + fields: [ "my_field1", "my_field2^10" ] + fuzziness: 1 + + - match: { hits.hits.0._id: "3" } + - match: { hits.hits.0._source.my_field2: "emu" } + +--- +"multi_match multiple fields with slop throws exception": + + - do: + catch: /\[slop\] not allowed for type \[bool_prefix\]/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + slop: 1 + +--- +"multi_match multiple fields with cutoff_frequency throws exception": + + - do: + catch: /\[cutoff_frequency\] not allowed for type \[bool_prefix\]/ + search: + rest_total_hits_as_int: true + index: test + body: + query: + multi_match: + query: "brown" + type: bool_prefix + fields: [ "my_field1", "my_field2" ] + cutoff_frequency: 0.001 diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml index caf97b302f132..addeb3226c575 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.get/10_basic.yml @@ -1,9 +1,6 @@ --- "get task test": # Note that this gets much better testing in reindex's tests because it actually saves the task - - do: - cluster.state: {} - - do: catch: missing tasks.get: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml index 4fdfc378bee26..1742134af2b75 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/tasks.list/10_basic.yml @@ -1,16 +1,18 @@ --- "tasks_list test": - - do: - cluster.state: {} + - skip: + features: [arbitrary_key] - # Get master node id - - set: { master_node: master } + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id - do: tasks.list: {} - is_true: nodes - - is_true: nodes.$master.roles + - is_true: nodes.$node_id.roles - do: tasks.list: diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 83e1e01614435..2cfe66372115f 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -289,6 +289,7 @@ import org.elasticsearch.rest.action.cat.RestAliasAction; import org.elasticsearch.rest.action.cat.RestAllocationAction; import org.elasticsearch.rest.action.cat.RestCatAction; +import org.elasticsearch.rest.action.cat.RestCatRecoveryAction; import org.elasticsearch.rest.action.cat.RestFielddataAction; import org.elasticsearch.rest.action.cat.RestHealthAction; import org.elasticsearch.rest.action.cat.RestIndicesAction; @@ -665,7 +666,7 @@ public void initRestHandlers(Supplier nodesInCluster) { // Fully qualified to prevent interference with rest.action.count.RestCountAction registerHandler.accept(new org.elasticsearch.rest.action.cat.RestCountAction(settings, restController)); // Fully qualified to prevent interference with rest.action.indices.RestRecoveryAction - registerHandler.accept(new org.elasticsearch.rest.action.cat.RestRecoveryAction(settings, restController)); + registerHandler.accept(new RestCatRecoveryAction(settings, restController)); registerHandler.accept(new RestHealthAction(settings, restController)); registerHandler.accept(new org.elasticsearch.rest.action.cat.RestPendingClusterTasksAction(settings, restController)); registerHandler.accept(new RestAliasAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java index 892daae4bb275..cc97b6237e30c 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportSingleItemBulkWriteAction.java @@ -23,19 +23,12 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteResponse; import org.elasticsearch.action.support.replication.ReplicatedWriteRequest; import org.elasticsearch.action.support.replication.ReplicationResponse; -import org.elasticsearch.action.support.replication.TransportWriteAction; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.Task; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import java.util.function.Supplier; @@ -45,68 +38,21 @@ public abstract class TransportSingleItemBulkWriteAction< Request extends ReplicatedWriteRequest, Response extends ReplicationResponse & WriteResponse - > extends TransportWriteAction { + > extends HandledTransportAction { private final TransportBulkAction bulkAction; - private final TransportShardBulkAction shardBulkAction; - - protected TransportSingleItemBulkWriteAction(Settings settings, String actionName, TransportService transportService, - ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, - ShardStateAction shardStateAction, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, Supplier request, - Supplier replicaRequest, String executor, - TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { - super(settings, actionName, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, - indexNameExpressionResolver, request, replicaRequest, executor); + protected TransportSingleItemBulkWriteAction(String actionName, TransportService transportService, ActionFilters actionFilters, + Supplier request, TransportBulkAction bulkAction) { + super(actionName, transportService, actionFilters, request); this.bulkAction = bulkAction; - this.shardBulkAction = shardBulkAction; } - @Override protected void doExecute(Task task, final Request request, final ActionListener listener) { bulkAction.execute(task, toSingleItemBulkRequest(request), wrapBulkResponse(listener)); } - @Override - protected WritePrimaryResult shardOperationOnPrimary( - Request request, final IndexShard primary) throws Exception { - BulkItemRequest[] itemRequests = new BulkItemRequest[1]; - WriteRequest.RefreshPolicy refreshPolicy = request.getRefreshPolicy(); - request.setRefreshPolicy(WriteRequest.RefreshPolicy.NONE); - itemRequests[0] = new BulkItemRequest(0, ((DocWriteRequest) request)); - BulkShardRequest bulkShardRequest = new BulkShardRequest(request.shardId(), refreshPolicy, itemRequests); - WritePrimaryResult bulkResult = - shardBulkAction.shardOperationOnPrimary(bulkShardRequest, primary); - assert bulkResult.finalResponseIfSuccessful.getResponses().length == 1 : "expected only one bulk shard response"; - BulkItemResponse itemResponse = bulkResult.finalResponseIfSuccessful.getResponses()[0]; - final Response response; - final Exception failure; - if (itemResponse.isFailed()) { - failure = itemResponse.getFailure().getCause(); - response = null; - } else { - response = (Response) itemResponse.getResponse(); - failure = null; - } - return new WritePrimaryResult<>(request, response, bulkResult.location, failure, primary, logger); - } - - @Override - protected WriteReplicaResult shardOperationOnReplica( - Request replicaRequest, IndexShard replica) throws Exception { - BulkItemRequest[] itemRequests = new BulkItemRequest[1]; - WriteRequest.RefreshPolicy refreshPolicy = replicaRequest.getRefreshPolicy(); - itemRequests[0] = new BulkItemRequest(0, ((DocWriteRequest) replicaRequest)); - BulkShardRequest bulkShardRequest = new BulkShardRequest(replicaRequest.shardId(), refreshPolicy, itemRequests); - WriteReplicaResult result = shardBulkAction.shardOperationOnReplica(bulkShardRequest, replica); - // a replica operation can never throw a document-level failure, - // as the same document has been already indexed successfully in the primary - return new WriteReplicaResult<>(replicaRequest, result.location, null, replica, logger); - } - - public static ActionListener wrapBulkResponse(ActionListener listener) { return ActionListener.wrap(bulkItemResponses -> { diff --git a/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java b/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java index 32c599a9f5804..5b85f2f908516 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java +++ b/server/src/main/java/org/elasticsearch/action/delete/TransportDeleteAction.java @@ -20,16 +20,9 @@ package org.elasticsearch.action.delete; import org.elasticsearch.action.bulk.TransportBulkAction; -import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; /** @@ -41,17 +34,7 @@ public class TransportDeleteAction extends TransportSingleItemBulkWriteAction { @Inject - public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService, - IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { - super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.WRITE, - bulkAction, shardBulkAction); - } - - @Override - protected DeleteResponse newResponseInstance() { - return new DeleteResponse(); + public TransportDeleteAction(TransportService transportService, ActionFilters actionFilters, TransportBulkAction bulkAction) { + super(DeleteAction.NAME, transportService, actionFilters, DeleteRequest::new, bulkAction); } } diff --git a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 8480c7be3bb6a..b8e3b9b89b3b4 100644 --- a/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -20,16 +20,9 @@ package org.elasticsearch.action.index; import org.elasticsearch.action.bulk.TransportBulkAction; -import org.elasticsearch.action.bulk.TransportShardBulkAction; import org.elasticsearch.action.bulk.TransportSingleItemBulkWriteAction; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; /** @@ -48,18 +41,7 @@ public class TransportIndexAction extends TransportSingleItemBulkWriteAction { @Inject - public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, - IndicesService indicesService, - ThreadPool threadPool, ShardStateAction shardStateAction, - ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, - TransportBulkAction bulkAction, TransportShardBulkAction shardBulkAction) { - super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, - actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.WRITE, - bulkAction, shardBulkAction); - } - - @Override - protected IndexResponse newResponseInstance() { - return new IndexResponse(); + public TransportIndexAction(ActionFilters actionFilters, TransportService transportService, TransportBulkAction bulkAction) { + super(IndexAction.NAME, transportService, actionFilters, IndexRequest::new, bulkAction); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java index f54f101041d1b..0125084c37099 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchPhaseController.java @@ -486,7 +486,7 @@ private ReducedQueryPhase reducedQueryPhase(Collection listener) { + assert request.shardId() != null : "request shardId must be set"; new ReroutePhase((ReplicationTask) task, request, listener).run(); } @@ -779,7 +780,6 @@ protected void doRun() { // resolve all derived request fields, so we can route and apply it resolveRequest(indexMetaData, request); - assert request.shardId() != null : "request shardId must be set in resolveRequest"; assert request.waitForActiveShards() != ActiveShardCount.DEFAULT : "request waitForActiveShards must be set in resolveRequest"; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index c4643771fb790..7271013cb363d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -29,10 +29,13 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.node.Node; import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; @@ -75,15 +78,28 @@ public class ClusterBootstrapService { public ClusterBootstrapService(Settings settings, TransportService transportService, Supplier> discoveredNodesSupplier, BooleanSupplier isBootstrappedSupplier, Consumer votingConfigurationConsumer) { - - final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); - bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); - if (bootstrapRequirements.size() != initialMasterNodes.size()) { - throw new IllegalArgumentException( - "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + if (DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE.equals(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) { + if (INITIAL_MASTER_NODES_SETTING.exists(settings)) { + throw new IllegalArgumentException("setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + + "] is not allowed when [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] is set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "]"); + } + if (DiscoveryNode.isMasterNode(settings) == false) { + throw new IllegalArgumentException("node with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] must be master-eligible"); + } + bootstrapRequirements = Collections.singleton(Node.NODE_NAME_SETTING.get(settings)); + unconfiguredBootstrapTimeout = null; + } else { + final List initialMasterNodes = INITIAL_MASTER_NODES_SETTING.get(settings); + bootstrapRequirements = unmodifiableSet(new LinkedHashSet<>(initialMasterNodes)); + if (bootstrapRequirements.size() != initialMasterNodes.size()) { + throw new IllegalArgumentException( + "setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] contains duplicates: " + initialMasterNodes); + } + unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); } - unconfiguredBootstrapTimeout = discoveryIsConfigured(settings) ? null : UNCONFIGURED_BOOTSTRAP_TIMEOUT_SETTING.get(settings); this.transportService = transportService; this.discoveredNodesSupplier = discoveredNodesSupplier; this.isBootstrappedSupplier = isBootstrappedSupplier; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 16bcab3cd3f22..154f4ab162d71 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -51,12 +51,14 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ListenableFuture; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoveryStats; import org.elasticsearch.discovery.HandshakingTransportAddressConnector; import org.elasticsearch.discovery.PeerFinder; @@ -69,6 +71,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Optional; @@ -94,6 +97,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery TimeValue.timeValueMillis(30000), TimeValue.timeValueMillis(1), Setting.Property.NodeScope); private final Settings settings; + private final boolean singleNodeDiscovery; private final TransportService transportService; private final MasterService masterService; private final AllocationService allocationService; @@ -143,6 +147,7 @@ public Coordinator(String nodeName, Settings settings, ClusterSettings clusterSe this.masterService = masterService; this.allocationService = allocationService; this.onJoinValidators = JoinTaskExecutor.addBuiltInJoinValidators(onJoinValidators); + this.singleNodeDiscovery = DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE.equals(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)); this.joinHelper = new JoinHelper(settings, allocationService, masterService, transportService, this::getCurrentTerm, this::getStateForMasterService, this::handleJoinRequest, this::joinLeaderInTerm, this.onJoinValidators); this.persistedStateSupplier = persistedStateSupplier; @@ -424,6 +429,13 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback assert Thread.holdsLock(mutex) == false; assert getLocalNode().isMasterNode() : getLocalNode() + " received a join but is not master-eligible"; logger.trace("handleJoinRequest: as {}, handling {}", mode, joinRequest); + + if (singleNodeDiscovery && joinRequest.getSourceNode().equals(getLocalNode()) == false) { + joinCallback.onFailure(new IllegalStateException("cannot join node with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + + "] set to [" + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] discovery")); + return; + } + transportService.connectToNode(joinRequest.getSourceNode()); final ClusterState stateForJoinValidation = getStateForMasterService(); @@ -636,6 +648,14 @@ protected void doStart() { coordinationState.set(new CoordinationState(settings, getLocalNode(), persistedState)); peerFinder.setCurrentTerm(getCurrentTerm()); configuredHostsResolver.start(); + VotingConfiguration votingConfiguration = coordinationState.get().getLastAcceptedState().getLastCommittedConfiguration(); + if (singleNodeDiscovery && + votingConfiguration.isEmpty() == false && + votingConfiguration.hasQuorum(Collections.singleton(getLocalNode().getId())) == false) { + throw new IllegalStateException("cannot start with [" + DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey() + "] set to [" + + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE + "] when local node " + getLocalNode() + + " does not have quorum in voting configuration " + votingConfiguration); + } ClusterState initialState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) .blocks(ClusterBlocks.builder() .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK) @@ -1049,7 +1069,8 @@ private class CoordinatorPeerFinder extends PeerFinder { CoordinatorPeerFinder(Settings settings, TransportService transportService, TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver) { - super(settings, transportService, transportAddressConnector, configuredHostsResolver); + super(settings, transportService, transportAddressConnector, + singleNodeDiscovery ? hostsResolver -> Collections.emptyList() : configuredHostsResolver); } @Override @@ -1060,6 +1081,13 @@ protected void onActiveMasterFound(DiscoveryNode masterNode, long term) { } } + @Override + protected void startProbe(TransportAddress transportAddress) { + if (singleNodeDiscovery == false) { + super.startProbe(transportAddress); + } + } + @Override protected void onFoundPeersUpdated() { synchronized (mutex) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java index 050d97ba54cf0..03fa790a87175 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolver.java @@ -157,10 +157,19 @@ Index[] concreteIndices(Context context, String... indexExpressions) { for (ExpressionResolver expressionResolver : expressionResolvers) { expressions = expressionResolver.resolve(context, expressions); } - + if (expressions.isEmpty()) { if (!options.allowNoIndices()) { - IndexNotFoundException infe = new IndexNotFoundException((String)null); + IndexNotFoundException infe; + if (indexExpressions.length == 1) { + if (indexExpressions[0].equals(MetaData.ALL)) { + infe = new IndexNotFoundException("no indices exist", (String)null); + } else { + infe = new IndexNotFoundException((String)null); + } + } else { + infe = new IndexNotFoundException((String)null); + } infe.setResources("index_expression", indexExpressions); throw infe; } else { @@ -173,7 +182,12 @@ Index[] concreteIndices(Context context, String... indexExpressions) { AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(expression); if (aliasOrIndex == null ) { if (failNoIndices) { - IndexNotFoundException infe = new IndexNotFoundException(expression); + IndexNotFoundException infe; + if (expression.equals(MetaData.ALL)) { + infe = new IndexNotFoundException("no indices exist", expression); + } else { + infe = new IndexNotFoundException(expression); + } infe.setResources("index_expression", expression); throw infe; } else { diff --git a/server/src/main/java/org/elasticsearch/common/Numbers.java b/server/src/main/java/org/elasticsearch/common/Numbers.java index 27c1dd18e97b8..51aecb5e19c9c 100644 --- a/server/src/main/java/org/elasticsearch/common/Numbers.java +++ b/server/src/main/java/org/elasticsearch/common/Numbers.java @@ -125,6 +125,10 @@ public static long toLongExact(Number n) { } } + // weak bounds on the BigDecimal representation to allow for coercion + private static BigDecimal BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE); + private static BigDecimal BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE); + /** Return the long that {@code stringValue} stores or throws an exception if the * stored value cannot be converted to a long that stores the exact same * value and {@code coerce} is false. */ @@ -138,6 +142,10 @@ public static long toLong(String stringValue, boolean coerce) { final BigInteger bigIntegerValue; try { BigDecimal bigDecimalValue = new BigDecimal(stringValue); + if (bigDecimalValue.compareTo(BIGDECIMAL_GREATER_THAN_LONG_MAX_VALUE) >= 0 || + bigDecimalValue.compareTo(BIGDECIMAL_LESS_THAN_LONG_MIN_VALUE) <= 0) { + throw new IllegalArgumentException("Value [" + stringValue + "] is out of range for a long"); + } bigIntegerValue = coerce ? bigDecimalValue.toBigInteger() : bigDecimalValue.toBigIntegerExact(); } catch (ArithmeticException e) { throw new IllegalArgumentException("Value [" + stringValue + "] has a decimal part"); diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java index a45667b908d74..6dcaaaa7d6a29 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoUtils.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; +import org.elasticsearch.common.xcontent.XContentSubParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.fielddata.FieldData; @@ -435,51 +436,52 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina NumberFormatException numberFormatException = null; if(parser.currentToken() == Token.START_OBJECT) { - while(parser.nextToken() != Token.END_OBJECT) { - if(parser.currentToken() == Token.FIELD_NAME) { - String field = parser.currentName(); - if(LATITUDE.equals(field)) { - parser.nextToken(); - switch (parser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lat = parser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new ElasticsearchParseException("latitude must be a number"); - } - } else if (LONGITUDE.equals(field)) { - parser.nextToken(); - switch (parser.currentToken()) { - case VALUE_NUMBER: - case VALUE_STRING: - try { - lon = parser.doubleValue(true); - } catch (NumberFormatException e) { - numberFormatException = e; - } - break; - default: - throw new ElasticsearchParseException("longitude must be a number"); - } - } else if (GEOHASH.equals(field)) { - if(parser.nextToken() == Token.VALUE_STRING) { - geohash = parser.text(); + try (XContentSubParser subParser = new XContentSubParser(parser)) { + while (subParser.nextToken() != Token.END_OBJECT) { + if (subParser.currentToken() == Token.FIELD_NAME) { + String field = subParser.currentName(); + if (LATITUDE.equals(field)) { + subParser.nextToken(); + switch (subParser.currentToken()) { + case VALUE_NUMBER: + case VALUE_STRING: + try { + lat = subParser.doubleValue(true); + } catch (NumberFormatException e) { + numberFormatException = e; + } + break; + default: + throw new ElasticsearchParseException("latitude must be a number"); + } + } else if (LONGITUDE.equals(field)) { + subParser.nextToken(); + switch (subParser.currentToken()) { + case VALUE_NUMBER: + case VALUE_STRING: + try { + lon = subParser.doubleValue(true); + } catch (NumberFormatException e) { + numberFormatException = e; + } + break; + default: + throw new ElasticsearchParseException("longitude must be a number"); + } + } else if (GEOHASH.equals(field)) { + if (subParser.nextToken() == Token.VALUE_STRING) { + geohash = subParser.text(); + } else { + throw new ElasticsearchParseException("geohash must be a string"); + } } else { - throw new ElasticsearchParseException("geohash must be a string"); + throw new ElasticsearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); } } else { - throw new ElasticsearchParseException("field must be either [{}], [{}] or [{}]", LATITUDE, LONGITUDE, GEOHASH); + throw new ElasticsearchParseException("token [{}] not allowed", subParser.currentToken()); } - } else { - throw new ElasticsearchParseException("token [{}] not allowed", parser.currentToken()); } } - if (geohash != null) { if(!Double.isNaN(lat) || !Double.isNaN(lon)) { throw new ElasticsearchParseException("field must be either lat/lon or geohash"); @@ -498,19 +500,21 @@ public static GeoPoint parseGeoPoint(XContentParser parser, GeoPoint point, fina } } else if(parser.currentToken() == Token.START_ARRAY) { - int element = 0; - while(parser.nextToken() != Token.END_ARRAY) { - if(parser.currentToken() == Token.VALUE_NUMBER) { - element++; - if(element == 1) { - lon = parser.doubleValue(); - } else if(element == 2) { - lat = parser.doubleValue(); + try (XContentSubParser subParser = new XContentSubParser(parser)) { + int element = 0; + while (subParser.nextToken() != Token.END_ARRAY) { + if (subParser.currentToken() == Token.VALUE_NUMBER) { + element++; + if (element == 1) { + lon = subParser.doubleValue(); + } else if (element == 2) { + lat = subParser.doubleValue(); + } else { + GeoPoint.assertZValue(ignoreZValue, subParser.doubleValue()); + } } else { - GeoPoint.assertZValue(ignoreZValue, parser.doubleValue()); + throw new ElasticsearchParseException("numeric value expected"); } - } else { - throw new ElasticsearchParseException("numeric value expected"); } } return point.reset(lat, lon); diff --git a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 8e51bc5951d59..960df44a62514 100644 --- a/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/server/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -50,11 +50,24 @@ public float score() { private final ScoreScript.LeafFactory script; + private final int shardId; + private final String indexName; + public ScriptScoreFunction(Script sScript, ScoreScript.LeafFactory script) { super(CombineFunction.REPLACE); this.sScript = sScript; this.script = script; + this.indexName = null; + this.shardId = -1; + } + + public ScriptScoreFunction(Script sScript, ScoreScript.LeafFactory script, String indexName, int shardId) { + super(CombineFunction.REPLACE); + this.sScript = sScript; + this.script = script; + this.indexName = indexName; + this.shardId = shardId; } @Override @@ -62,6 +75,8 @@ public LeafScoreFunction getLeafScoreFunction(LeafReaderContext ctx) throws IOEx final ScoreScript leafScript = script.newInstance(ctx); final CannedScorer scorer = new CannedScorer(); leafScript.setScorer(scorer); + leafScript._setIndexName(indexName); + leafScript._setShard(shardId); return new LeafScoreFunction() { @Override public double score(int docId, float subQueryScore) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java index a8dce661e1c9f..2379b4f00c2bf 100644 --- a/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java +++ b/server/src/main/java/org/elasticsearch/common/time/DateFormatters.java @@ -1585,7 +1585,7 @@ static JavaDateFormatter merge(String pattern, List formatters) { if (printer == null) { printer = javaDateFormatter.getPrinter(); } - dateTimeFormatters.add(javaDateFormatter.getParser()); + dateTimeFormatters.addAll(javaDateFormatter.getParsers()); roundupBuilder.appendOptional(javaDateFormatter.getRoundupParser()); } DateTimeFormatter roundUpParser = roundupBuilder.toFormatter(Locale.ROOT); @@ -1632,7 +1632,7 @@ public static ZonedDateTime from(TemporalAccessor accessor) { if (zoneId == null) { zoneId = ZoneOffset.UTC; } - + LocalDate localDate = accessor.query(TemporalQueries.localDate()); LocalTime localTime = accessor.query(TemporalQueries.localTime()); boolean isLocalDateSet = localDate != null; diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java index c3adcc84b5781..d0f4200b3bafe 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateFormatter.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Strings; +import java.text.ParsePosition; import java.time.ZoneId; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -29,7 +30,10 @@ import java.time.temporal.TemporalAccessor; import java.time.temporal.TemporalField; import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -39,6 +43,7 @@ class JavaDateFormatter implements DateFormatter { // base fields which should be used for default parsing, when we round up for date math private static final Map ROUND_UP_BASE_FIELDS = new HashMap<>(6); + { ROUND_UP_BASE_FIELDS.put(ChronoField.MONTH_OF_YEAR, 1L); ROUND_UP_BASE_FIELDS.put(ChronoField.DAY_OF_MONTH, 1L); @@ -50,22 +55,15 @@ class JavaDateFormatter implements DateFormatter { private final String format; private final DateTimeFormatter printer; - private final DateTimeFormatter parser; + private final List parsers; private final DateTimeFormatter roundupParser; - private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter roundupParser, DateTimeFormatter parser) { - this.format = format; - this.printer = printer; - this.roundupParser = roundupParser; - this.parser = parser; - } - JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeFormatter... parsers) { this(format, printer, builder -> ROUND_UP_BASE_FIELDS.forEach(builder::parseDefaulting), parsers); } JavaDateFormatter(String format, DateTimeFormatter printer, Consumer roundupParserConsumer, - DateTimeFormatter... parsers) { + DateTimeFormatter... parsers) { if (printer == null) { throw new IllegalArgumentException("printer may not be null"); } @@ -79,26 +77,21 @@ private JavaDateFormatter(String format, DateTimeFormatter printer, DateTimeForm } this.printer = printer; this.format = format; + if (parsers.length == 0) { - this.parser = printer; - } else if (parsers.length == 1) { - this.parser = parsers[0]; + this.parsers = Collections.singletonList(printer); } else { - DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); - for (DateTimeFormatter parser : parsers) { - builder.appendOptional(parser); - } - this.parser = builder.toFormatter(Locale.ROOT); + this.parsers = Arrays.asList(parsers); } DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); if (format.contains("||") == false) { - builder.append(this.parser); + builder.append(this.parsers.get(0)); } roundupParserConsumer.accept(builder); - DateTimeFormatter roundupFormatter = builder.toFormatter(parser.getLocale()); + DateTimeFormatter roundupFormatter = builder.toFormatter(locale()); if (printer.getZone() != null) { - roundupFormatter = roundupFormatter.withZone(printer.getZone()); + roundupFormatter = roundupFormatter.withZone(zone()); } this.roundupParser = roundupFormatter; } @@ -107,10 +100,6 @@ DateTimeFormatter getRoundupParser() { return roundupParser; } - DateTimeFormatter getParser() { - return parser; - } - DateTimeFormatter getPrinter() { return printer; } @@ -122,30 +111,64 @@ public TemporalAccessor parse(String input) { } try { - return parser.parse(input); + return doParse(input); } catch (DateTimeParseException e) { throw new IllegalArgumentException("failed to parse date field [" + input + "] with format [" + format + "]", e); } } + /** + * Attempt parsing the input without throwing exception. If multiple parsers are provided, + * it will continue iterating if the previous parser failed. The pattern must fully match, meaning whole input was used. + * This also means that this method depends on DateTimeFormatter.ClassicFormat.parseObject + * which does not throw exceptions when parsing failed. + * + * The approach with collection of parsers was taken because java-time requires ordering on optional (composite) + * patterns. Joda does not suffer from this. + * https://bugs.openjdk.java.net/browse/JDK-8188771 + * + * @param input An arbitrary string resembling the string representation of a date or time + * @return a TemporalAccessor if parsing was successful. + * @throws DateTimeParseException when unable to parse with any parsers + */ + private TemporalAccessor doParse(String input) { + if (parsers.size() > 1) { + for (DateTimeFormatter formatter : parsers) { + ParsePosition pos = new ParsePosition(0); + Object object = formatter.toFormat().parseObject(input, pos); + if (parsingSucceeded(object, input, pos) == true) { + return (TemporalAccessor) object; + } + } + throw new DateTimeParseException("Failed to parse with all enclosed parsers", input, 0); + } + return this.parsers.get(0).parse(input); + } + + private boolean parsingSucceeded(Object object, String input, ParsePosition pos) { + return object != null && pos.getIndex() == input.length(); + } + @Override public DateFormatter withZone(ZoneId zoneId) { // shortcurt to not create new objects unnecessarily - if (zoneId.equals(parser.getZone())) { + if (zoneId.equals(zone())) { return this; } - return new JavaDateFormatter(format, printer.withZone(zoneId), roundupParser.withZone(zoneId), parser.withZone(zoneId)); + return new JavaDateFormatter(format, printer.withZone(zoneId), + parsers.stream().map(p -> p.withZone(zoneId)).toArray(size -> new DateTimeFormatter[size])); } @Override public DateFormatter withLocale(Locale locale) { // shortcurt to not create new objects unnecessarily - if (locale.equals(parser.getLocale())) { + if (locale.equals(locale())) { return this; } - return new JavaDateFormatter(format, printer.withLocale(locale), roundupParser.withLocale(locale), parser.withLocale(locale)); + return new JavaDateFormatter(format, printer.withLocale(locale), + parsers.stream().map(p -> p.withLocale(locale)).toArray(size -> new DateTimeFormatter[size])); } @Override @@ -170,7 +193,7 @@ public ZoneId zone() { @Override public DateMathParser toDateMathParser() { - return new JavaDateMathParser(format, parser, roundupParser); + return new JavaDateMathParser(format, this, getRoundupParser()); } @Override @@ -186,12 +209,16 @@ public boolean equals(Object obj) { JavaDateFormatter other = (JavaDateFormatter) obj; return Objects.equals(format, other.format) && - Objects.equals(locale(), other.locale()) && - Objects.equals(this.printer.getZone(), other.printer.getZone()); + Objects.equals(locale(), other.locale()) && + Objects.equals(this.printer.getZone(), other.printer.getZone()); } @Override public String toString() { return String.format(Locale.ROOT, "format[%s] locale[%s]", format, locale()); } + + Collection getParsers() { + return parsers; + } } diff --git a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java index 05e1e75efca39..dc7c195e2fd6c 100644 --- a/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java +++ b/server/src/main/java/org/elasticsearch/common/time/JavaDateMathParser.java @@ -35,6 +35,7 @@ import java.time.temporal.TemporalAdjusters; import java.time.temporal.TemporalQueries; import java.util.Objects; +import java.util.function.Function; import java.util.function.LongSupplier; /** @@ -46,11 +47,11 @@ */ public class JavaDateMathParser implements DateMathParser { - private final DateTimeFormatter formatter; + private final JavaDateFormatter formatter; private final DateTimeFormatter roundUpFormatter; private final String format; - JavaDateMathParser(String format, DateTimeFormatter formatter, DateTimeFormatter roundUpFormatter) { + JavaDateMathParser(String format, JavaDateFormatter formatter, DateTimeFormatter roundUpFormatter) { this.format = format; Objects.requireNonNull(formatter); this.formatter = formatter; @@ -215,12 +216,12 @@ private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNo throw new ElasticsearchParseException("cannot parse empty date"); } - DateTimeFormatter formatter = roundUpIfNoTime ? this.roundUpFormatter : this.formatter; + Function formatter = roundUpIfNoTime ? this.roundUpFormatter::parse : this.formatter::parse; try { if (timeZone == null) { - return DateFormatters.from(formatter.parse(value)).toInstant(); + return DateFormatters.from(formatter.apply(value)).toInstant(); } else { - TemporalAccessor accessor = formatter.parse(value); + TemporalAccessor accessor = formatter.apply(value); ZoneId zoneId = TemporalQueries.zone().queryFrom(accessor); if (zoneId != null) { timeZone = zoneId; @@ -228,7 +229,7 @@ private Instant parseDateTime(String value, ZoneId timeZone, boolean roundUpIfNo return DateFormatters.from(accessor).withZoneSameLocal(timeZone).toInstant(); } - } catch (DateTimeParseException e) { + } catch (IllegalArgumentException | DateTimeParseException e) { throw new ElasticsearchParseException("failed to parse date field [{}] with format [{}]: [{}]", e, value, format, e.getMessage()); } diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java index ab95f2a430497..a14def8fa86b5 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java @@ -36,7 +36,6 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.discovery.single.SingleNodeDiscovery; import org.elasticsearch.gateway.GatewayMetaState; import org.elasticsearch.plugins.DiscoveryPlugin; import org.elasticsearch.threadpool.ThreadPool; @@ -50,7 +49,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Random; import java.util.Set; import java.util.function.BiConsumer; @@ -68,6 +66,8 @@ public class DiscoveryModule { public static final String ZEN2_DISCOVERY_TYPE = "zen"; + public static final String SINGLE_NODE_DISCOVERY_TYPE = "single-node"; + public static final Setting DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type", ZEN2_DISCOVERY_TYPE, Function.identity(), Property.NodeScope); public static final Setting> DISCOVERY_SEED_PROVIDERS_SETTING = @@ -114,6 +114,8 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic List filteredSeedProviders = seedProviderNames.stream() .map(hostProviders::get).map(Supplier::get).collect(Collectors.toList()); + String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); + final SeedHostsProvider seedHostsProvider = hostsResolver -> { final List addresses = new ArrayList<>(); for (SeedHostsProvider provider : filteredSeedProviders) { @@ -122,20 +124,17 @@ public DiscoveryModule(Settings settings, ThreadPool threadPool, TransportServic return Collections.unmodifiableList(addresses); }; - Map> discoveryTypes = new HashMap<>(); - discoveryTypes.put(ZEN2_DISCOVERY_TYPE, () -> new Coordinator(NODE_NAME_SETTING.get(settings), settings, clusterSettings, - transportService, namedWriteableRegistry, allocationService, masterService, - () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), seedHostsProvider, clusterApplier, - joinValidators, new Random(Randomness.get().nextLong()))); - discoveryTypes.put("single-node", () -> new SingleNodeDiscovery(settings, transportService, masterService, clusterApplier, - gatewayMetaState)); - String discoveryType = DISCOVERY_TYPE_SETTING.get(settings); - Supplier discoverySupplier = discoveryTypes.get(discoveryType); - if (discoverySupplier == null) { + if (ZEN2_DISCOVERY_TYPE.equals(discoveryType) || SINGLE_NODE_DISCOVERY_TYPE.equals(discoveryType)) { + discovery = new Coordinator(NODE_NAME_SETTING.get(settings), + settings, clusterSettings, + transportService, namedWriteableRegistry, allocationService, masterService, + () -> gatewayMetaState.getPersistedState(settings, (ClusterApplierService) clusterApplier), seedHostsProvider, + clusterApplier, joinValidators, new Random(Randomness.get().nextLong())); + } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); } + logger.info("using discovery type [{}] and seed hosts providers {}", discoveryType, seedProviderNames); - discovery = Objects.requireNonNull(discoverySupplier.get()); } public Discovery getDiscovery() { diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index 9bcdd5b544268..f3e52e8df5616 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -287,7 +287,7 @@ public String toString() { return peersRemoved; } - private void startProbe(TransportAddress transportAddress) { + protected void startProbe(TransportAddress transportAddress) { assert holdsLock() : "PeerFinder mutex not held"; if (active == false) { logger.trace("startProbe({}) not running", transportAddress); diff --git a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java b/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java deleted file mode 100644 index 2a415a74cd0cc..0000000000000 --- a/server/src/main/java/org/elasticsearch/discovery/single/SingleNodeDiscovery.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.single; - -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlocks; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.cluster.service.ClusterApplier.ClusterApplyListener; -import org.elasticsearch.cluster.service.ClusterApplierService; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.component.AbstractLifecycleComponent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.discovery.Discovery; -import org.elasticsearch.discovery.DiscoveryStats; -import org.elasticsearch.gateway.GatewayMetaState; -import org.elasticsearch.transport.TransportService; - -import java.util.Objects; - -import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; - -/** - * A discovery implementation where the only member of the cluster is the local node. - */ -public class SingleNodeDiscovery extends AbstractLifecycleComponent implements Discovery { - private static final Logger logger = LogManager.getLogger(SingleNodeDiscovery.class); - - private final ClusterName clusterName; - protected final TransportService transportService; - private final ClusterApplier clusterApplier; - private volatile ClusterState clusterState; - - public SingleNodeDiscovery(final Settings settings, final TransportService transportService, - final MasterService masterService, final ClusterApplier clusterApplier, - final GatewayMetaState gatewayMetaState) { - this.clusterName = ClusterName.CLUSTER_NAME_SETTING.get(settings); - this.transportService = Objects.requireNonNull(transportService); - masterService.setClusterStateSupplier(() -> clusterState); - this.clusterApplier = clusterApplier; - - if (clusterApplier instanceof ClusterApplierService) { - ((ClusterApplierService) clusterApplier).addLowPriorityApplier(gatewayMetaState); - } - } - - @Override - public synchronized void publish(final ClusterChangedEvent event, ActionListener publishListener, - final AckListener ackListener) { - clusterState = event.state(); - ackListener.onCommit(TimeValue.ZERO); - - clusterApplier.onNewClusterState("apply-locally-on-node[" + event.source() + "]", () -> clusterState, new ClusterApplyListener() { - @Override - public void onSuccess(String source) { - publishListener.onResponse(null); - ackListener.onNodeAck(transportService.getLocalNode(), null); - } - - @Override - public void onFailure(String source, Exception e) { - publishListener.onFailure(e); - ackListener.onNodeAck(transportService.getLocalNode(), e); - logger.warn(() -> new ParameterizedMessage("failed while applying cluster state locally [{}]", event.source()), e); - } - }); - } - - @Override - public DiscoveryStats stats() { - return new DiscoveryStats(null, null); - } - - @Override - public synchronized void startInitialJoin() { - if (lifecycle.started() == false) { - throw new IllegalStateException("can't start initial join when not started"); - } - // apply a fresh cluster state just so that state recovery gets triggered by GatewayService - // TODO: give discovery module control over GatewayService - clusterState = ClusterState.builder(clusterState).build(); - clusterApplier.onNewClusterState("single-node-start-initial-join", () -> clusterState, (source, e) -> {}); - } - - @Override - protected synchronized void doStart() { - // set initial state - DiscoveryNode localNode = transportService.getLocalNode(); - clusterState = createInitialState(localNode); - clusterApplier.setInitialState(clusterState); - } - - protected ClusterState createInitialState(DiscoveryNode localNode) { - ClusterState.Builder builder = ClusterState.builder(clusterName); - return builder.nodes(DiscoveryNodes.builder().add(localNode) - .localNodeId(localNode.getId()) - .masterNodeId(localNode.getId()) - .build()) - .blocks(ClusterBlocks.builder() - .addGlobalBlock(STATE_NOT_RECOVERED_BLOCK)) - .build(); - } - - @Override - protected void doStop() { - - } - - @Override - protected void doClose() { - - } - -} diff --git a/server/src/main/java/org/elasticsearch/index/Index.java b/server/src/main/java/org/elasticsearch/index/Index.java index ac5a2763644fa..9b6f4dbd98afb 100644 --- a/server/src/main/java/org/elasticsearch/index/Index.java +++ b/server/src/main/java/org/elasticsearch/index/Index.java @@ -50,8 +50,8 @@ public class Index implements Writeable, ToXContentObject { private final String uuid; public Index(String name, String uuid) { - this.name = Objects.requireNonNull(name).intern(); - this.uuid = Objects.requireNonNull(uuid).intern(); + this.name = Objects.requireNonNull(name); + this.uuid = Objects.requireNonNull(uuid); } /** diff --git a/server/src/main/java/org/elasticsearch/index/IndexModule.java b/server/src/main/java/org/elasticsearch/index/IndexModule.java index 6b83d2252dec7..acec458b8b0cd 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/server/src/main/java/org/elasticsearch/index/IndexModule.java @@ -366,6 +366,7 @@ public static Type defaultStoreType(final boolean allowMmap) { } public IndexService newIndexService( + IndexService.IndexCreationContext indexCreationContext, NodeEnvironment environment, NamedXContentRegistry xContentRegistry, IndexService.ShardStoreDeleter shardStoreDeleter, @@ -395,7 +396,7 @@ public IndexService newIndexService( } else { queryCache = new DisabledQueryCache(indexSettings); } - return new IndexService(indexSettings, environment, xContentRegistry, + return new IndexService(indexSettings, indexCreationContext, environment, xContentRegistry, new SimilarityService(indexSettings, scriptService, similarities), shardStoreDeleter, analysisRegistry, engineFactory, circuitBreakerService, bigArrays, threadPool, scriptService, client, queryCache, store, eventListener, searcherWrapperFactory, mapperRegistry, diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index ea40dd1db016d..501dbf442b00b 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -136,6 +136,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust public IndexService( IndexSettings indexSettings, + IndexCreationContext indexCreationContext, NodeEnvironment nodeEnv, NamedXContentRegistry xContentRegistry, SimilarityService similarityService, @@ -162,21 +163,36 @@ public IndexService( this.similarityService = similarityService; this.namedWriteableRegistry = namedWriteableRegistry; this.circuitBreakerService = circuitBreakerService; - this.mapperService = new MapperService(indexSettings, registry.build(indexSettings), xContentRegistry, similarityService, - mapperRegistry, - // we parse all percolator queries as they would be parsed on shard 0 - () -> newQueryShardContext(0, null, System::currentTimeMillis, null)); - this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); - if (indexSettings.getIndexSortConfig().hasIndexSort()) { - // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. - // The sort order is validated right after the merge of the mapping later in the process. - this.indexSortSupplier = () -> indexSettings.getIndexSortConfig().buildIndexSort( - mapperService::fullName, - indexFieldData::getForField - ); - } else { + if (indexSettings.getIndexMetaData().getState() == IndexMetaData.State.CLOSE && + indexCreationContext == IndexCreationContext.CREATE_INDEX) { // metadata verification needs a mapper service + this.mapperService = null; + this.indexFieldData = null; this.indexSortSupplier = () -> null; + this.bitsetFilterCache = null; + this.warmer = null; + this.indexCache = null; + } else { + this.mapperService = new MapperService(indexSettings, registry.build(indexSettings), xContentRegistry, similarityService, + mapperRegistry, + // we parse all percolator queries as they would be parsed on shard 0 + () -> newQueryShardContext(0, null, System::currentTimeMillis, null)); + this.indexFieldData = new IndexFieldDataService(indexSettings, indicesFieldDataCache, circuitBreakerService, mapperService); + if (indexSettings.getIndexSortConfig().hasIndexSort()) { + // we delay the actual creation of the sort order for this index because the mapping has not been merged yet. + // The sort order is validated right after the merge of the mapping later in the process. + this.indexSortSupplier = () -> indexSettings.getIndexSortConfig().buildIndexSort( + mapperService::fullName, + indexFieldData::getForField + ); + } else { + this.indexSortSupplier = () -> null; + } + indexFieldData.setListener(new FieldDataCacheListener(this)); + this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); + this.warmer = new IndexWarmer(threadPool, indexFieldData, bitsetFilterCache.createListener(threadPool)); + this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache); } + this.shardStoreDeleter = shardStoreDeleter; this.bigArrays = bigArrays; this.threadPool = threadPool; @@ -185,10 +201,6 @@ public IndexService( this.eventListener = eventListener; this.nodeEnv = nodeEnv; this.indexStore = indexStore; - indexFieldData.setListener(new FieldDataCacheListener(this)); - this.bitsetFilterCache = new BitsetFilterCache(indexSettings, new BitsetCacheListener(this)); - this.warmer = new IndexWarmer(threadPool, indexFieldData, bitsetFilterCache.createListener(threadPool)); - this.indexCache = new IndexCache(indexSettings, queryCache, bitsetFilterCache); this.engineFactory = Objects.requireNonNull(engineFactory); // initialize this last -- otherwise if the wrapper requires any other member to be non-null we fail with an NPE this.searcherWrapper = wrapperFactory.newWrapper(this); @@ -202,6 +214,11 @@ public IndexService( updateFsyncTaskIfNecessary(); } + public enum IndexCreationContext { + CREATE_INDEX, + META_DATA_VERIFICATION + } + public int numberOfShards() { return shards.size(); } @@ -548,7 +565,10 @@ List getSearchOperationListener() { // pkg private for @Override public boolean updateMapping(final IndexMetaData currentIndexMetaData, final IndexMetaData newIndexMetaData) throws IOException { - return mapperService().updateMapping(currentIndexMetaData, newIndexMetaData); + if (mapperService == null) { + return false; + } + return mapperService.updateMapping(currentIndexMetaData, newIndexMetaData); } private class StoreCloseListener implements Store.OnClose { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index edea3952ce4c5..9bed93c371696 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -633,14 +633,14 @@ protected final GetResult getFromSearcher(Get get, BiFunction search return GetResult.NOT_EXISTS; } if (get.versionType().isVersionConflictForReads(versionValue.version, get.version())) { - throw new VersionConflictEngineException(shardId, get.type(), get.id(), + throw new VersionConflictEngineException(shardId, get.id(), get.versionType().explainConflictForReads(versionValue.version, get.version())); } if (get.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( get.getIfSeqNo() != versionValue.seqNo || get.getIfPrimaryTerm() != versionValue.term )) { - throw new VersionConflictEngineException(shardId, get.type(), get.id(), + throw new VersionConflictEngineException(shardId, get.id(), get.getIfSeqNo(), get.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); } if (get.isReadFromTranslog()) { @@ -1004,13 +1004,13 @@ protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOExc currentNotFoundOrDeleted = versionValue.isDelete(); } if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.type(), index.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); } else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm() )) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.type(), index.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); } else if (index.versionType().isVersionConflictForWrites( @@ -1335,13 +1335,13 @@ protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOE } final DeletionStrategy plan; if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.type(), delete.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); } else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm() )) { - final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.type(), delete.id(), + final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), versionValue.seqNo, versionValue.term); plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); } else if (delete.versionType().isVersionConflictForWrites(currentVersion, delete.version(), currentlyDeleted)) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java index 357c9c107836e..0f6c217409c30 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java @@ -28,25 +28,25 @@ public class VersionConflictEngineException extends EngineException { public VersionConflictEngineException(ShardId shardId, Engine.Operation op, long currentVersion, boolean deleted) { - this(shardId, op.type(), op.id(), op.versionType().explainConflictForWrites(currentVersion, op.version(), deleted)); + this(shardId, op.id(), op.versionType().explainConflictForWrites(currentVersion, op.version(), deleted)); } - public VersionConflictEngineException(ShardId shardId, String type, String id, + public VersionConflictEngineException(ShardId shardId, String id, long compareAndWriteSeqNo, long compareAndWriteTerm, long currentSeqNo, long currentTerm) { - this(shardId, type, id, "required seqNo [" + compareAndWriteSeqNo + "], primary term [" + compareAndWriteTerm +"]." + + this(shardId, id, "required seqNo [" + compareAndWriteSeqNo + "], primary term [" + compareAndWriteTerm +"]." + (currentSeqNo == SequenceNumbers.UNASSIGNED_SEQ_NO ? " but no document was found" : " current document has seqNo [" + currentSeqNo + "] and primary term ["+ currentTerm + "]" )); } - public VersionConflictEngineException(ShardId shardId, String type, String id, String explanation) { - this(shardId, null, type, id, explanation); + public VersionConflictEngineException(ShardId shardId, String id, String explanation) { + this(shardId, null, id, explanation); } - public VersionConflictEngineException(ShardId shardId, Throwable cause, String type, String id, String explanation) { - this(shardId, "[{}][{}]: version conflict, {}", cause, type, id, explanation); + public VersionConflictEngineException(ShardId shardId, Throwable cause, String id, String explanation) { + this(shardId, "[{}]: version conflict, {}", cause, id, explanation); } public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index 805b50e628bb1..5790248ead807 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -74,6 +74,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.function.IntPredicate; import static org.elasticsearch.index.mapper.TypeParsers.parseTextField; @@ -687,69 +688,12 @@ public Query phrasePrefixQuery(TokenStream stream, int slop, int maxExpansions) } private Query analyzePhrasePrefix(TokenStream stream, int slop, int maxExpansions) throws IOException { - final MultiPhrasePrefixQuery query = createPhrasePrefixQuery(stream, name(), slop, maxExpansions); - - if (slop > 0 - || prefixFieldType == null - || prefixFieldType.indexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) < 0) { - return query; - } - - int lastPos = query.getTerms().length - 1; - final Term[][] terms = query.getTerms(); - final int[] positions = query.getPositions(); - for (Term term : terms[lastPos]) { - String value = term.text(); - if (value.length() < prefixFieldType.minChars || value.length() > prefixFieldType.maxChars) { - return query; - } - } - - if (terms.length == 1) { - Term[] newTerms = Arrays.stream(terms[0]) - .map(term -> new Term(prefixFieldType.name(), term.bytes())) - .toArray(Term[]::new); - return new SynonymQuery(newTerms); - } - - SpanNearQuery.Builder spanQuery = new SpanNearQuery.Builder(name(), true); - spanQuery.setSlop(slop); - int previousPos = -1; - for (int i = 0; i < terms.length; i++) { - Term[] posTerms = terms[i]; - int posInc = positions[i] - previousPos; - previousPos = positions[i]; - if (posInc > 1) { - spanQuery.addGap(posInc - 1); - } - if (i == lastPos) { - if (posTerms.length == 1) { - FieldMaskingSpanQuery fieldMask = - new FieldMaskingSpanQuery(new SpanTermQuery(new Term(prefixFieldType.name(), posTerms[0].bytes())), name()); - spanQuery.addClause(fieldMask); - } else { - SpanQuery[] queries = Arrays.stream(posTerms) - .map(term -> new FieldMaskingSpanQuery( - new SpanTermQuery(new Term(prefixFieldType.name(), term.bytes())), name()) - ) - .toArray(SpanQuery[]::new); - spanQuery.addClause(new SpanOrQuery(queries)); - } - } else { - if (posTerms.length == 1) { - spanQuery.addClause(new SpanTermQuery(posTerms[0])); - } else { - SpanTermQuery[] queries = Arrays.stream(posTerms) - .map(SpanTermQuery::new) - .toArray(SpanTermQuery[]::new); - spanQuery.addClause(new SpanOrQuery(queries)); - } - } - } - return spanQuery.build(); + String prefixField = prefixFieldType == null || slop > 0 ? null : prefixFieldType.name(); + IntPredicate usePrefix = (len) -> len >= prefixFieldType.minChars && len <= prefixFieldType.maxChars; + return createPhrasePrefixQuery(stream, name(), slop, maxExpansions, prefixField, usePrefix); } - private static boolean hasGaps(TokenStream stream) throws IOException { + public static boolean hasGaps(TokenStream stream) throws IOException { assert stream instanceof CachingTokenFilter; PositionIncrementAttribute posIncAtt = stream.getAttribute(PositionIncrementAttribute.class); stream.reset(); @@ -963,8 +907,8 @@ public static Query createPhraseQuery(TokenStream stream, String field, int slop return mpqb.build(); } - public static MultiPhrasePrefixQuery createPhrasePrefixQuery(TokenStream stream, String field, - int slop, int maxExpansions) throws IOException { + public static Query createPhrasePrefixQuery(TokenStream stream, String field, int slop, int maxExpansions, + String prefixField, IntPredicate usePrefixField) throws IOException { MultiPhrasePrefixQuery builder = new MultiPhrasePrefixQuery(field); builder.setSlop(slop); builder.setMaxExpansions(maxExpansions); @@ -987,6 +931,61 @@ public static MultiPhrasePrefixQuery createPhrasePrefixQuery(TokenStream stream, currentTerms.add(new Term(field, termAtt.getBytesRef())); } builder.add(currentTerms.toArray(new Term[0]), position); - return builder; + if (prefixField == null) { + return builder; + } + + int lastPos = builder.getTerms().length - 1; + final Term[][] terms = builder.getTerms(); + final int[] positions = builder.getPositions(); + for (Term term : terms[lastPos]) { + String value = term.text(); + if (usePrefixField.test(value.length()) == false) { + return builder; + } + } + + if (terms.length == 1) { + Term[] newTerms = Arrays.stream(terms[0]) + .map(term -> new Term(prefixField, term.bytes())) + .toArray(Term[]::new); + return new SynonymQuery(newTerms); + } + + SpanNearQuery.Builder spanQuery = new SpanNearQuery.Builder(field, true); + spanQuery.setSlop(slop); + int previousPos = -1; + for (int i = 0; i < terms.length; i++) { + Term[] posTerms = terms[i]; + int posInc = positions[i] - previousPos; + previousPos = positions[i]; + if (posInc > 1) { + spanQuery.addGap(posInc - 1); + } + if (i == lastPos) { + if (posTerms.length == 1) { + FieldMaskingSpanQuery fieldMask = + new FieldMaskingSpanQuery(new SpanTermQuery(new Term(prefixField, posTerms[0].bytes())), field); + spanQuery.addClause(fieldMask); + } else { + SpanQuery[] queries = Arrays.stream(posTerms) + .map(term -> new FieldMaskingSpanQuery( + new SpanTermQuery(new Term(prefixField, term.bytes())), field) + ) + .toArray(SpanQuery[]::new); + spanQuery.addClause(new SpanOrQuery(queries)); + } + } else { + if (posTerms.length == 1) { + spanQuery.addClause(new SpanTermQuery(posTerms[0])); + } else { + SpanTermQuery[] queries = Arrays.stream(posTerms) + .map(SpanTermQuery::new) + .toArray(SpanTermQuery[]::new); + spanQuery.addClause(new SpanOrQuery(queries)); + } + } + } + return spanQuery.build(); } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java new file mode 100644 index 0000000000000..7f0c89f9df499 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -0,0 +1,393 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.Query; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.query.support.QueryParsers; +import org.elasticsearch.index.search.MatchQuery; + +import java.io.IOException; +import java.util.Objects; + +import static org.elasticsearch.index.query.MatchQueryBuilder.FUZZY_REWRITE_FIELD; +import static org.elasticsearch.index.query.MatchQueryBuilder.FUZZY_TRANSPOSITIONS_FIELD; +import static org.elasticsearch.index.query.MatchQueryBuilder.MAX_EXPANSIONS_FIELD; +import static org.elasticsearch.index.query.MatchQueryBuilder.OPERATOR_FIELD; +import static org.elasticsearch.index.query.MatchQueryBuilder.PREFIX_LENGTH_FIELD; + +/** + * The boolean prefix query analyzes the input text and creates a boolean query containing a Term query for each term, except + * for the last term, which is used to create a prefix query + */ +public class MatchBoolPrefixQueryBuilder extends AbstractQueryBuilder { + + public static final String NAME = "match_bool_prefix"; + + private static final Operator DEFAULT_OPERATOR = Operator.OR; + + private final String fieldName; + + private final Object value; + + private String analyzer; + + private Operator operator = DEFAULT_OPERATOR; + + private String minimumShouldMatch; + + private Fuzziness fuzziness; + + private int prefixLength = FuzzyQuery.defaultPrefixLength; + + private int maxExpansions = FuzzyQuery.defaultMaxExpansions; + + private boolean fuzzyTranspositions = FuzzyQuery.defaultTranspositions; + + private String fuzzyRewrite; + + public MatchBoolPrefixQueryBuilder(String fieldName, Object value) { + if (Strings.isEmpty(fieldName)) { + throw new IllegalArgumentException("[" + NAME + "] requires fieldName"); + } + if (value == null) { + throw new IllegalArgumentException("[" + NAME + "] requires query value"); + } + this.fieldName = fieldName; + this.value = value; + } + + public MatchBoolPrefixQueryBuilder(StreamInput in) throws IOException { + super(in); + fieldName = in.readString(); + value = in.readGenericValue(); + analyzer = in.readOptionalString(); + operator = Operator.readFromStream(in); + minimumShouldMatch = in.readOptionalString(); + fuzziness = in.readOptionalWriteable(Fuzziness::new); + prefixLength = in.readVInt(); + maxExpansions = in.readVInt(); + fuzzyTranspositions = in.readBoolean(); + fuzzyRewrite = in.readOptionalString(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + out.writeString(fieldName); + out.writeGenericValue(value); + out.writeOptionalString(analyzer); + operator.writeTo(out); + out.writeOptionalString(minimumShouldMatch); + out.writeOptionalWriteable(fuzziness); + out.writeVInt(prefixLength); + out.writeVInt(maxExpansions); + out.writeBoolean(fuzzyTranspositions); + out.writeOptionalString(fuzzyRewrite); + } + + /** Returns the field name used in this query. */ + public String fieldName() { + return this.fieldName; + } + + /** Returns the value used in this query. */ + public Object value() { + return this.value; + } + + /** Get the analyzer to use, if previously set, otherwise {@code null} */ + public String analyzer() { + return this.analyzer; + } + + /** + * Explicitly set the analyzer to use. Defaults to use explicit mapping + * config for the field, or, if not set, the default search analyzer. + */ + public MatchBoolPrefixQueryBuilder analyzer(String analyzer) { + this.analyzer = analyzer; + return this; + } + + /** Sets the operator to use when using a boolean query. Defaults to {@code OR}. */ + public MatchBoolPrefixQueryBuilder operator(Operator operator) { + if (operator == null) { + throw new IllegalArgumentException("[" + NAME + "] requires operator to be non-null"); + } + this.operator = operator; + return this; + } + + /** Returns the operator to use in a boolean query.*/ + public Operator operator() { + return this.operator; + } + + /** Sets optional minimumShouldMatch value to apply to the query */ + public MatchBoolPrefixQueryBuilder minimumShouldMatch(String minimumShouldMatch) { + this.minimumShouldMatch = minimumShouldMatch; + return this; + } + + /** Gets the minimumShouldMatch value */ + public String minimumShouldMatch() { + return this.minimumShouldMatch; + } + + /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + public MatchBoolPrefixQueryBuilder fuzziness(Object fuzziness) { + this.fuzziness = Fuzziness.build(fuzziness); + return this; + } + + /** Gets the fuzziness used when evaluated to a fuzzy query type. */ + public Fuzziness fuzziness() { + return this.fuzziness; + } + + /** + * Sets the length of a length of common (non-fuzzy) prefix for fuzzy match queries + * @param prefixLength non-negative length of prefix + * @throws IllegalArgumentException in case the prefix is negative + */ + public MatchBoolPrefixQueryBuilder prefixLength(int prefixLength) { + if (prefixLength < 0 ) { + throw new IllegalArgumentException("[" + NAME + "] requires prefix length to be non-negative."); + } + this.prefixLength = prefixLength; + return this; + } + + /** + * Gets the length of a length of common (non-fuzzy) prefix for fuzzy match queries + */ + public int prefixLength() { + return this.prefixLength; + } + + /** + * When using fuzzy or prefix type query, the number of term expansions to use. + */ + public MatchBoolPrefixQueryBuilder maxExpansions(int maxExpansions) { + if (maxExpansions <= 0 ) { + throw new IllegalArgumentException("[" + NAME + "] requires maxExpansions to be positive."); + } + this.maxExpansions = maxExpansions; + return this; + } + + /** + * Get the (optional) number of term expansions when using fuzzy or prefix type query. + */ + public int maxExpansions() { + return this.maxExpansions; + } + + /** + * Sets whether transpositions are supported in fuzzy queries.

+ * The default metric used by fuzzy queries to determine a match is the Damerau-Levenshtein + * distance formula which supports transpositions. Setting transposition to false will + * switch to classic Levenshtein distance.
+ * If not set, Damerau-Levenshtein distance metric will be used. + */ + public MatchBoolPrefixQueryBuilder fuzzyTranspositions(boolean fuzzyTranspositions) { + this.fuzzyTranspositions = fuzzyTranspositions; + return this; + } + + /** Gets the fuzzy query transposition setting. */ + public boolean fuzzyTranspositions() { + return this.fuzzyTranspositions; + } + + /** Sets the fuzzy_rewrite parameter controlling how the fuzzy query will get rewritten */ + public MatchBoolPrefixQueryBuilder fuzzyRewrite(String fuzzyRewrite) { + this.fuzzyRewrite = fuzzyRewrite; + return this; + } + + /** + * Get the fuzzy_rewrite parameter + * @see #fuzzyRewrite(String) + */ + public String fuzzyRewrite() { + return this.fuzzyRewrite; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(NAME); + builder.startObject(fieldName); + builder.field(MatchQueryBuilder.QUERY_FIELD.getPreferredName(), value); + if (analyzer != null) { + builder.field(MatchQueryBuilder.ANALYZER_FIELD.getPreferredName(), analyzer); + } + builder.field(OPERATOR_FIELD.getPreferredName(), operator.toString()); + if (minimumShouldMatch != null) { + builder.field(MatchQueryBuilder.MINIMUM_SHOULD_MATCH_FIELD.getPreferredName(), minimumShouldMatch); + } + if (fuzziness != null) { + fuzziness.toXContent(builder, params); + } + builder.field(PREFIX_LENGTH_FIELD.getPreferredName(), prefixLength); + builder.field(MAX_EXPANSIONS_FIELD.getPreferredName(), maxExpansions); + builder.field(FUZZY_TRANSPOSITIONS_FIELD.getPreferredName(), fuzzyTranspositions); + if (fuzzyRewrite != null) { + builder.field(FUZZY_REWRITE_FIELD.getPreferredName(), fuzzyRewrite); + } + printBoostAndQueryName(builder); + builder.endObject(); + builder.endObject(); + } + + public static MatchBoolPrefixQueryBuilder fromXContent(XContentParser parser) throws IOException { + String fieldName = null; + Object value = null; + float boost = AbstractQueryBuilder.DEFAULT_BOOST; + String analyzer = null; + Operator operator = DEFAULT_OPERATOR; + String minimumShouldMatch = null; + Fuzziness fuzziness = null; + int prefixLength = FuzzyQuery.defaultPrefixLength; + int maxExpansion = FuzzyQuery.defaultMaxExpansions; + boolean fuzzyTranspositions = FuzzyQuery.defaultTranspositions; + String fuzzyRewrite = null; + String queryName = null; + XContentParser.Token token; + String currentFieldName = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName); + fieldName = currentFieldName; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token.isValue()) { + if (MatchQueryBuilder.QUERY_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + value = parser.objectText(); + } else if (MatchQueryBuilder.ANALYZER_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + analyzer = parser.text(); + } else if (OPERATOR_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + operator = Operator.fromString(parser.text()); + } else if (MatchQueryBuilder.MINIMUM_SHOULD_MATCH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + minimumShouldMatch = parser.textOrNull(); + } else if (Fuzziness.FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + fuzziness = Fuzziness.parse(parser); + } else if (PREFIX_LENGTH_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + prefixLength = parser.intValue(); + } else if (MAX_EXPANSIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + maxExpansion = parser.intValue(); + } else if (FUZZY_TRANSPOSITIONS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + fuzzyTranspositions = parser.booleanValue(); + } else if (FUZZY_REWRITE_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + fuzzyRewrite = parser.textOrNull(); + } else if (AbstractQueryBuilder.BOOST_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + boost = parser.floatValue(); + } else if (AbstractQueryBuilder.NAME_FIELD.match(currentFieldName, parser.getDeprecationHandler())) { + queryName = parser.text(); + } else { + throw new ParsingException(parser.getTokenLocation(), + "[" + NAME + "] query does not support [" + currentFieldName + "]"); + } + } else { + throw new ParsingException(parser.getTokenLocation(), + "[" + NAME + "] unknown token [" + token + "] after [" + currentFieldName + "]"); + } + } + } else { + throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, parser.currentName()); + fieldName = parser.currentName(); + value = parser.objectText(); + } + } + + MatchBoolPrefixQueryBuilder queryBuilder = new MatchBoolPrefixQueryBuilder(fieldName, value); + queryBuilder.analyzer(analyzer); + queryBuilder.operator(operator); + queryBuilder.minimumShouldMatch(minimumShouldMatch); + queryBuilder.boost(boost); + queryBuilder.queryName(queryName); + if (fuzziness != null) { + queryBuilder.fuzziness(fuzziness); + } + queryBuilder.prefixLength(prefixLength); + queryBuilder.maxExpansions(maxExpansion); + queryBuilder.fuzzyTranspositions(fuzzyTranspositions); + queryBuilder.fuzzyRewrite(fuzzyRewrite); + return queryBuilder; + } + + @Override + protected Query doToQuery(QueryShardContext context) throws IOException { + if (analyzer != null && context.getIndexAnalyzers().get(analyzer) == null) { + throw new QueryShardException(context, "[" + NAME + "] analyzer [" + analyzer + "] not found"); + } + + final MatchQuery matchQuery = new MatchQuery(context); + if (analyzer != null) { + matchQuery.setAnalyzer(analyzer); + } + matchQuery.setOccur(operator.toBooleanClauseOccur()); + matchQuery.setFuzziness(fuzziness); + matchQuery.setFuzzyPrefixLength(prefixLength); + matchQuery.setMaxExpansions(maxExpansions); + matchQuery.setTranspositions(fuzzyTranspositions); + matchQuery.setFuzzyRewriteMethod(QueryParsers.parseRewriteMethod(fuzzyRewrite, null, LoggingDeprecationHandler.INSTANCE)); + + final Query query = matchQuery.parse(MatchQuery.Type.BOOLEAN_PREFIX, fieldName, value); + return Queries.maybeApplyMinimumShouldMatch(query, minimumShouldMatch); + } + + @Override + protected boolean doEquals(MatchBoolPrefixQueryBuilder other) { + return Objects.equals(fieldName, other.fieldName) && + Objects.equals(value, other.value) && + Objects.equals(analyzer, other.analyzer) && + Objects.equals(operator, other.operator) && + Objects.equals(minimumShouldMatch, other.minimumShouldMatch) && + Objects.equals(fuzziness, other.fuzziness) && + Objects.equals(prefixLength, other.prefixLength) && + Objects.equals(maxExpansions, other.maxExpansions) && + Objects.equals(fuzzyTranspositions, other.fuzzyTranspositions) && + Objects.equals(fuzzyRewrite, other.fuzzyRewrite); + } + + @Override + protected int doHashCode() { + return Objects.hash(fieldName, value, analyzer, operator, minimumShouldMatch, fuzziness, prefixLength, maxExpansions, + fuzzyTranspositions, fuzzyRewrite); + } + + @Override + public String getWriteableName() { + return NAME; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index 0e9148e540102..267c86ea84486 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -128,7 +128,12 @@ public enum Type implements Writeable { * Uses the best matching phrase-prefix field as main score and uses * a tie-breaker to adjust the score based on remaining field matches */ - PHRASE_PREFIX(MatchQuery.Type.PHRASE_PREFIX, 0.0f, new ParseField("phrase_prefix")); + PHRASE_PREFIX(MatchQuery.Type.PHRASE_PREFIX, 0.0f, new ParseField("phrase_prefix")), + + /** + * Uses the sum of the matching boolean fields to score the query + */ + BOOL_PREFIX(MatchQuery.Type.BOOLEAN_PREFIX, 1.0f, new ParseField("bool_prefix")); private MatchQuery.Type matchQueryType; private final float tieBreaker; @@ -687,6 +692,16 @@ public static MultiMatchQueryBuilder fromXContent(XContentParser parser) throws "Fuzziness not allowed for type [" + type.parseField.getPreferredName() + "]"); } + if (slop != DEFAULT_PHRASE_SLOP && type == Type.BOOL_PREFIX) { + throw new ParsingException(parser.getTokenLocation(), + "[" + SLOP_FIELD.getPreferredName() + "] not allowed for type [" + type.parseField.getPreferredName() + "]"); + } + + if (cutoffFrequency != null && type == Type.BOOL_PREFIX) { + throw new ParsingException(parser.getTokenLocation(), + "[" + CUTOFF_FREQUENCY_FIELD.getPreferredName() + "] not allowed for type [" + type.parseField.getPreferredName() + "]"); + } + MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder(value) .fields(fieldsBoosts) .type(type) diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java index a860bd19d7c5f..accfd2f656999 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/ScriptScoreFunctionBuilder.java @@ -94,7 +94,7 @@ protected ScoreFunction doToFunction(QueryShardContext context) { try { ScoreScript.Factory factory = context.getScriptService().compile(script, ScoreScript.CONTEXT); ScoreScript.LeafFactory searchScript = factory.newFactory(script.getParams(), context.lookup()); - return new ScriptScoreFunction(script, searchScript); + return new ScriptScoreFunction(script, searchScript, context.index().getName(), context.getShardId()); } catch (Exception e) { throw new QueryShardException(context, "script_score: the script could not be loaded", e); } diff --git a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java index ad4b267eef643..da7273aa66303 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.CachingTokenFilter; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; +import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.PositionLengthAttribute; import org.apache.lucene.analysis.tokenattributes.TermToBytesRefAttribute; @@ -51,7 +52,9 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.SpanBooleanQueryRewriteWithMaxClause; import org.elasticsearch.common.unit.Fuzziness; +import org.elasticsearch.index.mapper.KeywordFieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.TextFieldMapper; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.support.QueryParsers; @@ -78,7 +81,11 @@ public enum Type implements Writeable { /** * The text is analyzed and used in a phrase query, with the last term acting as a prefix. */ - PHRASE_PREFIX(2); + PHRASE_PREFIX(2), + /** + * The text is analyzed, terms are added to a boolean query with the last term acting as a prefix. + */ + BOOLEAN_PREFIX(3); private final int ordinal; @@ -244,11 +251,18 @@ public Query parse(Type type, String fieldName, Object value) throws IOException /* * If a keyword analyzer is used, we know that further analysis isn't - * needed and can immediately return a term query. + * needed and can immediately return a term query. If the query is a bool + * prefix query and the field type supports prefix queries, we return + * a prefix query instead */ - if (analyzer == Lucene.KEYWORD_ANALYZER - && type != Type.PHRASE_PREFIX) { - return builder.newTermQuery(new Term(fieldName, value.toString())); + if (analyzer == Lucene.KEYWORD_ANALYZER && type != Type.PHRASE_PREFIX) { + final Term term = new Term(fieldName, value.toString()); + if ((fieldType instanceof TextFieldMapper.TextFieldType || fieldType instanceof KeywordFieldMapper.KeywordFieldType) + && type == Type.BOOLEAN_PREFIX) { + return builder.newPrefixQuery(fieldName, term); + } else { + return builder.newTermQuery(term); + } } return parseInternal(type, fieldName, builder, value); @@ -265,6 +279,10 @@ protected final Query parseInternal(Type type, String fieldName, MatchQueryBuild } break; + case BOOLEAN_PREFIX: + query = builder.createBooleanPrefixQuery(fieldName, value.toString(), occur); + break; + case PHRASE: query = builder.createPhraseQuery(fieldName, value.toString(), phraseSlop); break; @@ -354,10 +372,28 @@ protected Query createFieldQuery(Analyzer analyzer, BooleanClause.Occur operator return createQuery(field, queryText, type, operator, slop); } - public Query createPhrasePrefixQuery(String field, String queryText, int slop) { + /** + * Creates a phrase prefix query from the query text. + * + * @param field field name + * @param queryText text to be passed to the analyzer + * @return {@code PrefixQuery}, {@code MultiPhrasePrefixQuery}, based on the analysis of {@code queryText} + */ + protected Query createPhrasePrefixQuery(String field, String queryText, int slop) { return createQuery(field, queryText, Type.PHRASE_PREFIX, occur, slop); } + /** + * Creates a boolean prefix query from the query text. + * + * @param field field name + * @param queryText text to be passed to the analyzer + * @return {@code PrefixQuery}, {@code BooleanQuery}, based on the analysis of {@code queryText} + */ + protected Query createBooleanPrefixQuery(String field, String queryText, BooleanClause.Occur occur) { + return createQuery(field, queryText, Type.BOOLEAN_PREFIX, occur, 0); + } + private Query createFieldQuery(TokenStream source, Type type, BooleanClause.Occur operator, String field, int phraseSlop) { assert operator == BooleanClause.Occur.SHOULD || operator == BooleanClause.Occur.MUST; @@ -405,14 +441,14 @@ private Query createFieldQuery(TokenStream source, Type type, BooleanClause.Occu if (type == Type.PHRASE_PREFIX) { return analyzePhrasePrefix(field, stream, phraseSlop, positionCount); } else { - return analyzeTerm(field, stream); + return analyzeTerm(field, stream, type == Type.BOOLEAN_PREFIX); } } else if (isGraph) { // graph if (type == Type.PHRASE || type == Type.PHRASE_PREFIX) { return analyzeGraphPhrase(stream, field, type, phraseSlop); } else { - return analyzeGraphBoolean(field, stream, operator); + return analyzeGraphBoolean(field, stream, operator, type == Type.BOOLEAN_PREFIX); } } else if (type == Type.PHRASE && positionCount > 1) { // phrase @@ -433,7 +469,7 @@ private Query createFieldQuery(TokenStream source, Type type, BooleanClause.Occu return analyzeBoolean(field, stream); } else { // complex case: multiple positions - return analyzeMultiBoolean(field, stream, operator); + return analyzeMultiBoolean(field, stream, operator, type == Type.BOOLEAN_PREFIX); } } } catch (IOException e) { @@ -462,13 +498,13 @@ private Query createQuery(String field, String queryText, Type type, BooleanClau } } - private SpanQuery newSpanQuery(Term[] terms, boolean prefix) { + private SpanQuery newSpanQuery(Term[] terms, boolean isPrefix) { if (terms.length == 1) { - return prefix ? fieldType.spanPrefixQuery(terms[0].text(), spanRewriteMethod, context) : new SpanTermQuery(terms[0]); + return isPrefix ? fieldType.spanPrefixQuery(terms[0].text(), spanRewriteMethod, context) : new SpanTermQuery(terms[0]); } SpanQuery[] spanQueries = new SpanQuery[terms.length]; for (int i = 0; i < terms.length; i++) { - spanQueries[i] = prefix ? new SpanTermQuery(terms[i]) : + spanQueries[i] = isPrefix ? new SpanTermQuery(terms[i]) : fieldType.spanPrefixQuery(terms[i].text(), spanRewriteMethod, context); } return new SpanOrQuery(spanQueries); @@ -479,7 +515,7 @@ protected SpanQuery createSpanQuery(TokenStream in, String field) throws IOExcep return createSpanQuery(in, field, false); } - private SpanQuery createSpanQuery(TokenStream in, String field, boolean prefix) throws IOException { + private SpanQuery createSpanQuery(TokenStream in, String field, boolean isPrefix) throws IOException { TermToBytesRefAttribute termAtt = in.getAttribute(TermToBytesRefAttribute.class); PositionIncrementAttribute posIncAtt = in.getAttribute(PositionIncrementAttribute.class); if (termAtt == null) { @@ -498,7 +534,7 @@ private SpanQuery createSpanQuery(TokenStream in, String field, boolean prefix) lastTerm = new Term(field, termAtt.getBytesRef()); } if (lastTerm != null) { - SpanQuery spanQuery = prefix ? + SpanQuery spanQuery = isPrefix ? fieldType.spanPrefixQuery(lastTerm.text(), spanRewriteMethod, context) : new SpanTermQuery(lastTerm); builder.addClause(spanQuery); } @@ -537,6 +573,74 @@ protected Query newTermQuery(Term term) { } } + /** + * Builds a new prefix query instance. + */ + protected Query newPrefixQuery(String field, Term term) { + try { + return fieldType.prefixQuery(term.text(), null, context); + } catch (RuntimeException e) { + if (lenient) { + return newLenientFieldQuery(field, e); + } + throw e; + } + } + + private Query analyzeTerm(String field, TokenStream stream, boolean isPrefix) throws IOException { + TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); + OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class); + + stream.reset(); + if (!stream.incrementToken()) { + throw new AssertionError(); + } + final Term term = new Term(field, termAtt.getBytesRef()); + int lastOffset = offsetAtt.endOffset(); + stream.end(); + return isPrefix && lastOffset == offsetAtt.endOffset() ? newPrefixQuery(field, term) : newTermQuery(term); + } + + private void add(BooleanQuery.Builder q, String field, List current, BooleanClause.Occur operator, boolean isPrefix) { + if (current.isEmpty()) { + return; + } + if (current.size() == 1) { + if (isPrefix) { + q.add(newPrefixQuery(field, current.get(0)), operator); + } else { + q.add(newTermQuery(current.get(0)), operator); + } + } else { + // We don't apply prefix on synonyms + q.add(newSynonymQuery(current.toArray(new Term[current.size()])), operator); + } + } + + private Query analyzeMultiBoolean(String field, TokenStream stream, + BooleanClause.Occur operator, boolean isPrefix) throws IOException { + BooleanQuery.Builder q = newBooleanQuery(); + List currentQuery = new ArrayList<>(); + + TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class); + PositionIncrementAttribute posIncrAtt = stream.getAttribute(PositionIncrementAttribute.class); + OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class); + + stream.reset(); + int lastOffset = 0; + while (stream.incrementToken()) { + if (posIncrAtt.getPositionIncrement() != 0) { + add(q, field, currentQuery, operator, false); + currentQuery.clear(); + } + currentQuery.add(new Term(field, termAtt.getBytesRef())); + lastOffset = offsetAtt.endOffset(); + } + stream.end(); + add(q, field, currentQuery, operator, isPrefix && lastOffset == offsetAtt.endOffset()); + return q.build(); + } + @Override protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException { try { @@ -577,6 +681,62 @@ private Query analyzePhrasePrefix(String field, TokenStream stream, int slop, in } } + private Query analyzeGraphBoolean(String field, TokenStream source, + BooleanClause.Occur operator, boolean isPrefix) throws IOException { + source.reset(); + GraphTokenStreamFiniteStrings graph = new GraphTokenStreamFiniteStrings(source); + BooleanQuery.Builder builder = new BooleanQuery.Builder(); + int[] articulationPoints = graph.articulationPoints(); + int lastState = 0; + for (int i = 0; i <= articulationPoints.length; i++) { + int start = lastState; + int end = -1; + if (i < articulationPoints.length) { + end = articulationPoints[i]; + } + lastState = end; + final Query queryPos; + boolean usePrefix = isPrefix && end == -1; + if (graph.hasSidePath(start)) { + final Iterator it = graph.getFiniteStrings(start, end); + Iterator queries = new Iterator() { + @Override + public boolean hasNext() { + return it.hasNext(); + } + + @Override + public Query next() { + TokenStream ts = it.next(); + final Type type; + if (getAutoGenerateMultiTermSynonymsPhraseQuery()) { + type = usePrefix + ? Type.PHRASE_PREFIX + : Type.PHRASE; + } else { + type = Type.BOOLEAN; + } + return createFieldQuery(ts, type, BooleanClause.Occur.MUST, field, 0); + } + }; + queryPos = newGraphSynonymQuery(queries); + } else { + Term[] terms = graph.getTerms(field, start); + assert terms.length > 0; + if (terms.length == 1) { + queryPos = usePrefix ? newPrefixQuery(field, terms[0]) : newTermQuery(terms[0]); + } else { + // We don't apply prefix on synonyms + queryPos = newSynonymQuery(terms); + } + } + if (queryPos != null) { + builder.add(queryPos, operator); + } + } + return builder.build(); + } + private Query analyzeGraphPhrase(TokenStream source, String field, Type type, int slop) throws IOException { assert type == Type.PHRASE_PREFIX || type == Type.PHRASE; @@ -615,13 +775,13 @@ private Query analyzeGraphPhrase(TokenStream source, String field, Type type, in } lastState = end; final SpanQuery queryPos; - boolean endPrefix = end == -1 && type == Type.PHRASE_PREFIX; + boolean usePrefix = end == -1 && type == Type.PHRASE_PREFIX; if (graph.hasSidePath(start)) { List queries = new ArrayList<>(); Iterator it = graph.getFiniteStrings(start, end); while (it.hasNext()) { TokenStream ts = it.next(); - SpanQuery q = createSpanQuery(ts, field, endPrefix); + SpanQuery q = createSpanQuery(ts, field, usePrefix); if (q != null) { if (queries.size() >= maxClauseCount) { throw new BooleanQuery.TooManyClauses(); @@ -640,7 +800,7 @@ private Query analyzeGraphPhrase(TokenStream source, String field, Type type, in if (terms.length >= maxClauseCount) { throw new BooleanQuery.TooManyClauses(); } - queryPos = newSpanQuery(terms, endPrefix); + queryPos = newSpanQuery(terms, usePrefix); } if (queryPos != null) { diff --git a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 88fd5293392b5..667d3a3823db8 100644 --- a/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -66,6 +66,7 @@ public Query parse(MultiMatchQueryBuilder.Type type, Map fieldNam case PHRASE_PREFIX: case BEST_FIELDS: case MOST_FIELDS: + case BOOL_PREFIX: queries = buildFieldQueries(type, fieldNames, value, minimumShouldMatch); break; @@ -179,10 +180,23 @@ protected Query newSynonymQuery(Term[] terms) { } @Override - public Query newTermQuery(Term term) { + protected Query newTermQuery(Term term) { return blendTerm(context, term.bytes(), commonTermsCutoff, tieBreaker, lenient, blendedFields); } + @Override + protected Query newPrefixQuery(String field, Term term) { + List disjunctions = new ArrayList<>(); + for (FieldAndBoost fieldType : blendedFields) { + Query query = fieldType.fieldType.prefixQuery(term.text(), null, context); + if (fieldType.boost != 1f) { + query = new BoostQuery(query, fieldType.boost); + } + disjunctions.add(query); + } + return new DisjunctionMaxQuery(disjunctions, tieBreaker); + } + @Override protected Query analyzePhrase(String field, TokenStream stream, int slop) throws IOException { List disjunctions = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 4efac334bde17..97d1939c1b292 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2493,8 +2493,9 @@ private EngineConfig newEngineConfig() { Sort indexSort = indexSortSupplier.get(); return new EngineConfig(shardId, shardRouting.allocationId().getId(), threadPool, indexSettings, warmer, store, indexSettings.getMergePolicy(), - mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, - indexCache.query(), cachingPolicy, translogConfig, + mapperService != null ? mapperService.indexAnalyzer() : null, + similarityService.similarity(mapperService), codecService, shardEventListener, + indexCache != null ? indexCache.query() : null, cachingPolicy, translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Collections.singletonList(refreshListeners), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), @@ -3077,7 +3078,9 @@ public void afterRefresh(boolean didRefresh) throws IOException { private EngineConfig.TombstoneDocSupplier tombstoneDocSupplier() { final RootObjectMapper.Builder noopRootMapper = new RootObjectMapper.Builder("__noop"); - final DocumentMapper noopDocumentMapper = new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService); + final DocumentMapper noopDocumentMapper = mapperService != null ? + new DocumentMapper.Builder(noopRootMapper, mapperService).build(mapperService) : + null; return new EngineConfig.TombstoneDocSupplier() { @Override public ParsedDocument newDeleteTombstoneDoc(String type, String id) { diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index 4a12bdae6b9ea..913fb47157eda 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -156,6 +156,8 @@ import static java.util.Collections.unmodifiableMap; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; import static org.elasticsearch.common.util.CollectionUtils.arrayAsArrayList; +import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; +import static org.elasticsearch.index.IndexService.IndexCreationContext.META_DATA_VERIFICATION; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; public class IndicesService extends AbstractLifecycleComponent @@ -491,7 +493,7 @@ public void onStoreClosed(ShardId shardId) { finalListeners.add(oldShardsStats); final IndexService indexService = createIndexService( - "create index", + CREATE_INDEX, indexMetaData, indicesQueryCache, indicesFieldDataCache, @@ -513,7 +515,7 @@ public void onStoreClosed(ShardId shardId) { /** * This creates a new IndexService without registering it */ - private synchronized IndexService createIndexService(final String reason, + private synchronized IndexService createIndexService(IndexService.IndexCreationContext indexCreationContext, IndexMetaData indexMetaData, IndicesQueryCache indicesQueryCache, IndicesFieldDataCache indicesFieldDataCache, @@ -526,7 +528,7 @@ private synchronized IndexService createIndexService(final String reason, indexMetaData.getIndex(), idxSettings.getNumberOfShards(), idxSettings.getNumberOfReplicas(), - reason); + indexCreationContext); final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry, getEngineFactory(idxSettings), indexStoreFactories); for (IndexingOperationListener operationListener : indexingOperationListeners) { @@ -537,6 +539,7 @@ private synchronized IndexService createIndexService(final String reason, indexModule.addIndexEventListener(listener); } return indexModule.newIndexService( + indexCreationContext, nodeEnv, xContentRegistry, this, @@ -615,7 +618,7 @@ public synchronized void verifyIndexMetadata(IndexMetaData metaData, IndexMetaDa closeables.add(indicesQueryCache); // this will also fail if some plugin fails etc. which is nice since we can verify that early final IndexService service = - createIndexService("metadata verification", metaData, indicesQueryCache, indicesFieldDataCache, emptyList()); + createIndexService(META_DATA_VERIFICATION, metaData, indicesQueryCache, indicesFieldDataCache, emptyList()); closeables.add(() -> service.close("metadata verification", false)); service.mapperService().merge(metaData, MapperService.MergeReason.MAPPING_RECOVERY); if (metaData.equals(metaDataUpdate) == false) { diff --git a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java index fc5311be5cbde..218713383227e 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Pipeline.java +++ b/server/src/main/java/org/elasticsearch/ingest/Pipeline.java @@ -89,6 +89,9 @@ public static Pipeline create(String id, Map config, /** * Modifies the data of a document to be indexed based on the processor this pipeline holds + * + * If null is returned then this document will be dropped and not indexed, otherwise + * this document will be kept and indexed. */ public IngestDocument execute(IngestDocument ingestDocument) throws Exception { long startTimeInNanos = relativeTimeProvider.getAsLong(); diff --git a/server/src/main/java/org/elasticsearch/ingest/Processor.java b/server/src/main/java/org/elasticsearch/ingest/Processor.java index 92b08bba77bf7..c064ddb35a129 100644 --- a/server/src/main/java/org/elasticsearch/ingest/Processor.java +++ b/server/src/main/java/org/elasticsearch/ingest/Processor.java @@ -39,6 +39,9 @@ public interface Processor { /** * Introspect and potentially modify the incoming data. + * + * @return If null is returned then the current document will be dropped and not be indexed, + * otherwise this document will be kept and indexed */ IngestDocument execute(IngestDocument ingestDocument) throws Exception; diff --git a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatRecoveryAction.java similarity index 89% rename from server/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java rename to server/src/main/java/org/elasticsearch/rest/action/cat/RestCatRecoveryAction.java index a66741c2d9411..0cea93e4e7ee7 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/cat/RestCatRecoveryAction.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -47,8 +48,8 @@ * in a string format, designed to be used at the command line. An Index can * be specified to limit output to a particular index or indices. */ -public class RestRecoveryAction extends AbstractCatAction { - public RestRecoveryAction(Settings settings, RestController restController) { +public class RestCatRecoveryAction extends AbstractCatAction { + public RestCatRecoveryAction(Settings settings, RestController restController) { super(settings); restController.registerHandler(GET, "/_cat/recovery", this); restController.registerHandler(GET, "/_cat/recovery/{index}", this); @@ -86,6 +87,10 @@ protected Table getTableWithHeader(RestRequest request) { t.startHeaders() .addCell("index", "alias:i,idx;desc:index name") .addCell("shard", "alias:s,sh;desc:shard name") + .addCell("start_time", "default:false;alias:start;desc:recovery start time") + .addCell("start_time_millis", "default:false;alias:start_millis;desc:recovery start time in epoch milliseconds") + .addCell("stop_time", "default:false;alias:stop;desc:recovery stop time") + .addCell("stop_time_millis", "default:false;alias:stop_millis;desc:recovery stop time in epoch milliseconds") .addCell("time", "alias:t,ti;desc:recovery time") .addCell("type", "alias:ty;desc:recovery type") .addCell("stage", "alias:st;desc:recovery stage") @@ -149,6 +154,10 @@ public int compare(RecoveryState o1, RecoveryState o2) { t.startRow(); t.addCell(index); t.addCell(state.getShardId().id()); + t.addCell(XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime())); + t.addCell(state.getTimer().startTime()); + t.addCell(XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime())); + t.addCell(state.getTimer().stopTime()); t.addCell(new TimeValue(state.getTimer().time())); t.addCell(state.getRecoverySource().getType().toString().toLowerCase(Locale.ROOT)); t.addCell(state.getStage().toString().toLowerCase(Locale.ROOT)); diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScript.java b/server/src/main/java/org/elasticsearch/script/ScoreScript.java index 6ac5935826bf7..f31af4c008c74 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScript.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScript.java @@ -62,6 +62,11 @@ public abstract class ScoreScript { private DoubleSupplier scoreSupplier = () -> 0.0; + private final int docBase; + private int docId; + private int shardId = -1; + private String indexName = null; + public ScoreScript(Map params, SearchLookup lookup, LeafReaderContext leafContext) { // null check needed b/c of expression engine subclass if (lookup == null) { @@ -69,11 +74,13 @@ public ScoreScript(Map params, SearchLookup lookup, LeafReaderCo assert leafContext == null; this.params = null; this.leafLookup = null; + this.docBase = 0; } else { this.leafLookup = lookup.getLeafSearchLookup(leafContext); params = new HashMap<>(params); params.putAll(leafLookup.asMap()); this.params = new DeprecationMap(params, DEPRECATIONS, "score-script"); + this.docBase = leafContext.docBase; } } @@ -91,6 +98,7 @@ public final Map> getDoc() { /** Set the current document to run the script on next. */ public void setDocument(int docid) { + this.docId = docid; leafLookup.setDocument(docid); } @@ -104,10 +112,74 @@ public void setScorer(Scorable scorer) { }; } + /** + * Accessed as _score in the painless script + * @return the score of the inner query + */ public double get_score() { return scoreSupplier.getAsDouble(); } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + * It is only used within predefined painless functions. + * @return the internal document ID + */ + public int _getDocId() { + return docId; + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + * It is only used within predefined painless functions. + * @return the internal document ID with the base + */ + public int _getDocBaseId() { + return docBase + docId; + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + * It is only used within predefined painless functions. + * @return shard id or throws an exception if shard is not set up for this script instance + */ + public int _getShardId() { + if (shardId > -1) { + return shardId; + } else { + throw new IllegalArgumentException("shard id can not be looked up!"); + } + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + * It is only used within predefined painless functions. + * @return index name or throws an exception if the index name is not set up for this script instance + */ + public String _getIndex() { + if (indexName != null) { + return indexName; + } else { + throw new IllegalArgumentException("index name can not be looked up!"); + } + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + */ + public void _setShard(int shardId) { + this.shardId = shardId; + } + + /** + * Starting a name with underscore, so that the user cannot access this function directly through a script + */ + public void _setIndexName(String indexName) { + this.indexName = indexName; + } + + /** A factory to construct {@link ScoreScript} instances. */ public interface LeafFactory { diff --git a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java index 273b8fcf8559d..c7d6e889397ff 100644 --- a/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java +++ b/server/src/main/java/org/elasticsearch/script/ScoreScriptUtils.java @@ -21,22 +21,20 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.StringHelper; -import org.elasticsearch.common.Randomness; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.time.DateMathParser; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.index.mapper.DateFieldMapper; import java.time.ZoneId; -import java.util.Random; -/** - * ScoringScriptImpl can be used as {@link ScoreScript} - * to run a previously compiled Painless script. - */ +import static com.carrotsearch.hppc.BitMixer.mix32; + public final class ScoreScriptUtils { /****** STATIC FUNCTIONS that can be used by users for score calculations **/ @@ -53,26 +51,50 @@ public static double sigmoid(double value, double k, double a){ return Math.pow(value,a) / (Math.pow(k,a) + Math.pow(value,a)); } + // random score based on the documents' values of the given field + public static final class RandomScoreField { + private final ScoreScript scoreScript; + private final ScriptDocValues docValues; + private final int saltedSeed; - // reproducible random - public static double randomReproducible(String seedValue, int seed) { - int hash = StringHelper.murmurhash3_x86_32(new BytesRef(seedValue), seed); - return (hash & 0x00FFFFFF) / (float)(1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0 - } - // not reproducible random - public static final class RandomNotReproducible { - private final Random rnd; + public RandomScoreField(ScoreScript scoreScript, int seed, String fieldName) { + this.scoreScript = scoreScript; + this.docValues = scoreScript.getDoc().get(fieldName); + int salt = (scoreScript._getIndex().hashCode() << 10) | scoreScript._getShardId(); + this.saltedSeed = mix32(salt ^ seed); - public RandomNotReproducible() { - this.rnd = Randomness.get(); } - public double randomNotReproducible() { - return rnd.nextDouble(); + public double randomScore() { + try { + docValues.setNextDocId(scoreScript._getDocId()); + String seedValue = String.valueOf(docValues.get(0)); + int hash = StringHelper.murmurhash3_x86_32(new BytesRef(seedValue), saltedSeed); + return (hash & 0x00FFFFFF) / (float)(1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0 + } catch (Exception e) { + throw ExceptionsHelper.convertToElastic(e); + } } } + // random score based on the internal Lucene document Ids + public static final class RandomScoreDoc { + private final ScoreScript scoreScript; + private final int saltedSeed; + + public RandomScoreDoc(ScoreScript scoreScript, int seed) { + this.scoreScript = scoreScript; + int salt = (scoreScript._getIndex().hashCode() << 10) | scoreScript._getShardId(); + this.saltedSeed = mix32(salt ^ seed); + } + + public double randomScore() { + String seedValue = Integer.toString(scoreScript._getDocBaseId()); + int hash = StringHelper.murmurhash3_x86_32(new BytesRef(seedValue), saltedSeed); + return (hash & 0x00FFFFFF) / (float)(1 << 24); // only use the lower 24 bits to construct a float from 0.0-1.0 + } + } // **** Decay functions on geo field public static final class DecayGeoLinear { diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index de4f548f6cf08..4c6ba07c631af 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.ParseFieldRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.MatchBoolPrefixQueryBuilder; import org.elasticsearch.index.query.BoostingQueryBuilder; import org.elasticsearch.index.query.CommonTermsQueryBuilder; import org.elasticsearch.index.query.ConstantScoreQueryBuilder; @@ -786,6 +787,8 @@ private void registerQueryParsers(List plugins) { registerQuery(new QuerySpec<>(IntervalQueryBuilder.NAME, IntervalQueryBuilder::new, IntervalQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(DistanceFeatureQueryBuilder.NAME, DistanceFeatureQueryBuilder::new, DistanceFeatureQueryBuilder::fromXContent)); + registerQuery( + new QuerySpec<>(MatchBoolPrefixQueryBuilder.NAME, MatchBoolPrefixQueryBuilder::new, MatchBoolPrefixQueryBuilder::fromXContent)); if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { registerQuery(new QuerySpec<>(GeoShapeQueryBuilder.NAME, GeoShapeQueryBuilder::new, GeoShapeQueryBuilder::fromXContent)); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 4c614626d1d1b..8cf5ee7a41de0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -132,7 +132,6 @@ public void execute(SearchContext context) { throw new AggregationExecutionException("Failed to build aggregation [" + aggregator.name() + "]", e); } } - context.queryResult().aggregations(new InternalAggregations(aggregations)); List pipelineAggregators = context.aggregations().factories().createPipelineAggregators(); List siblingPipelineAggregators = new ArrayList<>(pipelineAggregators.size()); for (PipelineAggregator pipelineAggregator : pipelineAggregators) { @@ -144,7 +143,7 @@ public void execute(SearchContext context) { + "allowed at the top level"); } } - context.queryResult().pipelineAggregators(siblingPipelineAggregators); + context.queryResult().aggregations(new InternalAggregations(aggregations, siblingPipelineAggregators)); // disable aggregations so that they don't run on next pages in case of scrolling context.aggregations(null); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 187f5e3864ed1..8910ca25c337d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -77,7 +77,7 @@ public InternalAggregations(List aggregations, List getTopLevelPipelineAggregators() { + public List getTopLevelPipelineAggregators() { return topLevelPipelineAggregators; } @@ -91,20 +91,7 @@ public static InternalAggregations reduce(List aggregation if (aggregationsList.isEmpty()) { return null; } - InternalAggregations first = aggregationsList.get(0); - return reduce(aggregationsList, first.topLevelPipelineAggregators, context); - } - - /** - * Reduces the given list of aggregations as well as the provided top-level pipeline aggregators. - * Note that top-level pipeline aggregators are reduced only as part of the final reduction phase, otherwise they are left untouched. - */ - public static InternalAggregations reduce(List aggregationsList, - List topLevelPipelineAggregators, - ReduceContext context) { - if (aggregationsList.isEmpty()) { - return null; - } + List topLevelPipelineAggregators = aggregationsList.get(0).getTopLevelPipelineAggregators(); // first we collect all aggregations of the same type and list them together Map> aggByName = new HashMap<>(); diff --git a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 34d3508f6bab5..9f9a2c2680a1f 100644 --- a/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.TotalHits; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; @@ -28,6 +29,7 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; @@ -37,7 +39,6 @@ import java.io.IOException; import java.util.Collections; import java.util.List; -import java.util.Objects; import java.util.stream.Collectors; import static org.elasticsearch.common.lucene.Lucene.readTopDocs; @@ -54,7 +55,6 @@ public final class QuerySearchResult extends SearchPhaseResult { private DocValueFormat[] sortValueFormats; private InternalAggregations aggregations; private boolean hasAggs; - private List pipelineAggregators = Collections.emptyList(); private Suggest suggest; private boolean searchTimedOut; private Boolean terminatedEarly = null; @@ -198,14 +198,6 @@ public void profileResults(ProfileShardResult shardResults) { hasProfileResults = shardResults != null; } - public List pipelineAggregators() { - return pipelineAggregators; - } - - public void pipelineAggregators(List pipelineAggregators) { - this.pipelineAggregators = Objects.requireNonNull(pipelineAggregators); - } - public Suggest suggest() { return suggest; } @@ -294,8 +286,18 @@ public void readFromWithId(long id, StreamInput in) throws IOException { if (hasAggs = in.readBoolean()) { aggregations = InternalAggregations.readAggregations(in); } - pipelineAggregators = in.readNamedWriteableList(PipelineAggregator.class).stream().map(a -> (SiblingPipelineAggregator) a) - .collect(Collectors.toList()); + if (in.getVersion().before(Version.V_7_1_0)) { + List pipelineAggregators = in.readNamedWriteableList(PipelineAggregator.class).stream() + .map(a -> (SiblingPipelineAggregator) a).collect(Collectors.toList()); + if (hasAggs && pipelineAggregators.isEmpty() == false) { + List internalAggs = aggregations.asList().stream() + .map(agg -> (InternalAggregation) agg).collect(Collectors.toList()); + //Earlier versions serialize sibling pipeline aggs separately as they used to be set to QuerySearchResult directly, while + //later versions include them in InternalAggregations. Note that despite serializing sibling pipeline aggs as part of + //InternalAggregations is supported since 6.7.0, the shards set sibling pipeline aggs to InternalAggregations only from 7.1. + this.aggregations = new InternalAggregations(internalAggs, pipelineAggregators); + } + } if (in.readBoolean()) { suggest = new Suggest(in); } @@ -332,7 +334,16 @@ public void writeToNoId(StreamOutput out) throws IOException { out.writeBoolean(true); aggregations.writeTo(out); } - out.writeNamedWriteableList(pipelineAggregators); + if (out.getVersion().before(Version.V_7_1_0)) { + //Earlier versions expect sibling pipeline aggs separately as they used to be set to QuerySearchResult directly, + //while later versions expect them in InternalAggregations. Note that despite serializing sibling pipeline aggs as part of + //InternalAggregations is supported since 6.7.0, the shards set sibling pipeline aggs to InternalAggregations only from 7.1 on. + if (aggregations == null) { + out.writeNamedWriteableList(Collections.emptyList()); + } else { + out.writeNamedWriteableList(aggregations.getTopLevelPipelineAggregators()); + } + } if (suggest == null) { out.writeBoolean(false); } else { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index 1bf47d1a42f94..859a96e784c33 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -145,8 +145,6 @@ public String getKey(final String key) { /** * A proxy address for the remote cluster. - * NOTE: this settings is undocumented until we have at last one transport that supports passing - * on the hostname via a mechanism like SNI. */ public static final Setting.AffixSetting REMOTE_CLUSTERS_PROXY = Setting.affixKeySetting( "cluster.remote.", diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java index a13e8af919b2a..b3ecc59076759 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportBulkActionIngestTests.java @@ -50,7 +50,6 @@ import org.elasticsearch.ingest.IngestService; import org.elasticsearch.tasks.Task; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportResponseHandler; import org.elasticsearch.transport.TransportService; import org.junit.Before; @@ -156,15 +155,8 @@ void createIndex(String index, TimeValue timeout, ActionListener { TestSingleItemBulkWriteAction(TestTransportBulkAction bulkAction) { - super(SETTINGS, IndexAction.NAME, TransportBulkActionIngestTests.this.transportService, - TransportBulkActionIngestTests.this.clusterService, - null, null, null, new ActionFilters(Collections.emptySet()), null, - IndexRequest::new, IndexRequest::new, ThreadPool.Names.WRITE, bulkAction, null); - } - - @Override - protected IndexResponse newResponseInstance() { - return new IndexResponse(); + super(IndexAction.NAME, TransportBulkActionIngestTests.this.transportService, + new ActionFilters(Collections.emptySet()), IndexRequest::new, bulkAction); } } diff --git a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java index 8fa8060b38536..610a72de6ecfd 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/TransportShardBulkActionTests.java @@ -550,7 +550,7 @@ public void testUpdateRequestWithConflictFailure() throws Exception { IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); - Exception err = new VersionConflictEngineException(shardId, "_doc", "id", + Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult indexResult = new Engine.IndexResult(err, 0, 0, 0); IndexShard shard = mock(IndexShard.class); @@ -784,7 +784,7 @@ public void testRetries() throws Exception { IndexRequest updateResponse = new IndexRequest("index", "_doc", "id").source(Requests.INDEX_CONTENT_TYPE, "field", "value"); - Exception err = new VersionConflictEngineException(shardId, "_doc", "id", + Exception err = new VersionConflictEngineException(shardId, "id", "I'm conflicted <(;_;)>"); Engine.IndexResult conflictedResult = new Engine.IndexResult(err, 0, 0); Engine.IndexResult mappingUpdate = diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java index ec9681a7b62fb..ab31c44ff2d59 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseMergerTests.java @@ -396,7 +396,6 @@ public void testMergeAggs() throws InterruptedException { assertEquals(totalCount, bucket.getDocCount()); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40214") public void testMergeSearchHits() throws InterruptedException { final long currentRelativeTime = randomLong(); final SearchTimeProvider timeProvider = new SearchTimeProvider(randomLong(), 0, () -> currentRelativeTime); @@ -442,6 +441,7 @@ public void testMergeSearchHits() throws InterruptedException { float expectedMaxScore = Float.NEGATIVE_INFINITY; int numIndices = requestedSize == 0 ? 0 : randomIntBetween(1, requestedSize); Iterator> indicesIterator = randomRealisticIndices(numIndices, numResponses).entrySet().iterator(); + boolean hasHits = false; for (int i = 0; i < numResponses; i++) { Map.Entry entry = indicesIterator.next(); String clusterAlias = entry.getKey(); @@ -465,6 +465,7 @@ public void testMergeSearchHits() throws InterruptedException { float maxScore = scoreSort ? numDocs * scoreFactor : Float.NaN; SearchHit[] hits = randomSearchHitArray(numDocs, numResponses, clusterAlias, indices, maxScore, scoreFactor, sortFields, priorityQueue); + hasHits |= hits.length > 0; expectedMaxScore = Math.max(expectedMaxScore, maxScore); Object[] collapseValues = null; @@ -513,8 +514,14 @@ public void testMergeSearchHits() throws InterruptedException { assertNull(searchResponse.getScrollId()); SearchHits searchHits = searchResponse.getHits(); - assertArrayEquals(sortFields, searchHits.getSortFields()); - assertEquals(collapseField, searchHits.getCollapseField()); + // the sort fields and the collapse field are not returned when hits are empty + if (hasHits) { + assertArrayEquals(sortFields, searchHits.getSortFields()); + assertEquals(collapseField, searchHits.getCollapseField()); + } else { + assertNull(searchHits.getSortFields()); + assertNull(searchHits.getCollapseField()); + } if (expectedTotalHits == null) { assertNull(searchHits.getTotalHits()); } else { @@ -532,7 +539,9 @@ public void testMergeSearchHits() throws InterruptedException { priorityQueue.poll(); } SearchHit[] hits = searchHits.getHits(); - if (collapseField != null) { + if (collapseField != null + // the collapse field is not returned when hits are empty + && hasHits) { assertEquals(hits.length, searchHits.getCollapseValues().length); } else { assertNull(searchHits.getCollapseValues()); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java index a2dee54b3c6fc..4dd3e3e33c39f 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ClusterBootstrapServiceTests.java @@ -23,6 +23,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.DiscoveryModule; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransport; import org.elasticsearch.transport.TransportRequest; @@ -461,4 +463,52 @@ public void testDoesNotIncludeExtraNodes() { deterministicTaskQueue.runAllTasks(); assertTrue(bootstrapped.get()); } + + public void testBootstrapsAutomaticallyWithSingleNodeDiscovery() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()); + final AtomicBoolean bootstrapped = new AtomicBoolean(); + + ClusterBootstrapService clusterBootstrapService = new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> { + assertTrue(bootstrapped.compareAndSet(false, true)); + assertThat(vc.getNodeIds(), hasSize(1)); + assertThat(vc.getNodeIds(), hasItem(localNode.getId())); + assertTrue(vc.hasQuorum(singletonList(localNode.getId()))); + }); + + transportService.start(); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertTrue(bootstrapped.get()); + + bootstrapped.set(false); + clusterBootstrapService.onFoundPeersUpdated(); + deterministicTaskQueue.runAllTasks(); + assertFalse(bootstrapped.get()); // should only bootstrap once + } + + public void testFailBootstrapWithBothSingleNodeDiscoveryAndInitialMasterNodes() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()) + .put(INITIAL_MASTER_NODES_SETTING.getKey(), "test"); + + assertThat(expectThrows(IllegalArgumentException.class, () -> new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> fail())).getMessage(), + containsString("setting [" + INITIAL_MASTER_NODES_SETTING.getKey() + "] is not allowed when [discovery.type] is set " + + "to [single-node]")); + } + + public void testFailBootstrapNonMasterEligibleNodeWithSingleNodeDiscovery() { + final Settings.Builder settings = Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE) + .put(NODE_NAME_SETTING.getKey(), localNode.getName()) + .put(Node.NODE_MASTER_SETTING.getKey(), false); + + assertThat(expectThrows(IllegalArgumentException.class, () -> new ClusterBootstrapService(settings.build(), + transportService, () -> emptyList(), () -> false, vc -> fail())).getMessage(), + containsString("node with [discovery.type] set to [single-node] must be master-eligible")); + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 07ae6679e4489..6f078217e4f45 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -63,6 +63,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor; import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider.HostsResolver; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.gateway.MetaStateService; @@ -204,7 +205,7 @@ public void testCanUpdateClusterStateAfterStabilisation() { } public void testDoesNotElectNonMasterNode() { - final Cluster cluster = new Cluster(randomIntBetween(1, 5), false); + final Cluster cluster = new Cluster(randomIntBetween(1, 5), false, Settings.EMPTY); cluster.runRandomly(); cluster.stabilise(); @@ -904,7 +905,7 @@ public void testIncompatibleDiffResendsFullState() { * and join the leader again. */ public void testStayCandidateAfterReceivingFollowerCheckFromKnownMaster() { - final Cluster cluster = new Cluster(2, false); + final Cluster cluster = new Cluster(2, false, Settings.EMPTY); cluster.runRandomly(); cluster.stabilise(); @@ -1029,7 +1030,7 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti final ClusterNode shiftedNode = randomFrom(cluster2.clusterNodes).restartedNode(); final ClusterNode newNode = cluster1.new ClusterNode(nextNodeIndex.getAndIncrement(), - shiftedNode.getLocalNode(), n -> shiftedNode.persistedState); + shiftedNode.getLocalNode(), n -> shiftedNode.persistedState, shiftedNode.nodeSettings); cluster1.clusterNodes.add(newNode); MockLogAppender mockAppender = new MockLogAppender(); @@ -1053,7 +1054,7 @@ public void testCannotJoinClusterWithDifferentUUID() throws IllegalAccessExcepti final ClusterNode detachedNode = newNode.restartedNode( metaData -> DetachClusterCommand.updateMetaData(metaData), - term -> DetachClusterCommand.updateCurrentTerm()); + term -> DetachClusterCommand.updateCurrentTerm(), newNode.nodeSettings); cluster1.clusterNodes.replaceAll(cn -> cn == newNode ? detachedNode : cn); cluster1.stabilise(); } @@ -1111,6 +1112,43 @@ public void testFollowerRemovedIfUnableToSendRequestsToMaster() { + DEFAULT_CLUSTER_STATE_UPDATE_DELAY); } + public void testSingleNodeDiscoveryWithoutQuorum() { + final Cluster cluster = new Cluster(3); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode clusterNode = cluster.getAnyNode(); + logger.debug("rebooting [{}]", clusterNode.getId()); + clusterNode.close(); + cluster.clusterNodes.forEach( + cn -> cluster.deterministicTaskQueue.scheduleNow(cn.onNode( + new Runnable() { + @Override + public void run() { + cn.transportService.disconnectFromNode(clusterNode.getLocalNode()); + } + + @Override + public String toString() { + return "disconnect from " + clusterNode.getLocalNode() + " after shutdown"; + } + }))); + IllegalStateException ise = expectThrows(IllegalStateException.class, + () -> cluster.clusterNodes.replaceAll(cn -> cn == clusterNode ? + cn.restartedNode(Function.identity(), Function.identity(), Settings.builder() + .put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE).build()) : + cn)); + assertThat(ise.getMessage(), containsString("cannot start with [discovery.type] set to [single-node] when local node")); + assertThat(ise.getMessage(), containsString("does not have quorum in voting configuration")); + } + + public void testSingleNodeDiscoveryWithQuorum() { + final Cluster cluster = new Cluster(1, randomBoolean(), Settings.builder().put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), + DiscoveryModule.SINGLE_NODE_DISCOVERY_TYPE).build()); + cluster.runRandomly(); + cluster.stabilise(); + } + private static long defaultMillis(Setting setting) { return setting.get(Settings.EMPTY).millis() + Cluster.DEFAULT_DELAY_VARIABILITY; } @@ -1184,10 +1222,10 @@ class Cluster { private List seedHostsList; Cluster(int initialNodeCount) { - this(initialNodeCount, true); + this(initialNodeCount, true, Settings.EMPTY); } - Cluster(int initialNodeCount, boolean allNodesMasterEligible) { + Cluster(int initialNodeCount, boolean allNodesMasterEligible, Settings nodeSettings) { deterministicTaskQueue.setExecutionDelayVariabilityMillis(DEFAULT_DELAY_VARIABILITY); assertThat(initialNodeCount, greaterThan(0)); @@ -1196,7 +1234,7 @@ class Cluster { clusterNodes = new ArrayList<>(initialNodeCount); for (int i = 0; i < initialNodeCount; i++) { final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), - allNodesMasterEligible || i == 0 || randomBoolean()); + allNodesMasterEligible || i == 0 || randomBoolean(), nodeSettings); clusterNodes.add(clusterNode); if (clusterNode.getLocalNode().isMasterNode()) { masterEligibleNodeIds.add(clusterNode.getId()); @@ -1229,7 +1267,7 @@ List addNodes(int newNodesCount) { final List addedNodes = new ArrayList<>(); for (int i = 0; i < newNodesCount; i++) { - final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true); + final ClusterNode clusterNode = new ClusterNode(nextNodeIndex.getAndIncrement(), true, Settings.EMPTY); addedNodes.add(clusterNode); } clusterNodes.addAll(addedNodes); @@ -1701,6 +1739,7 @@ class ClusterNode { private Coordinator coordinator; private final DiscoveryNode localNode; private final MockPersistedState persistedState; + private final Settings nodeSettings; private AckedFakeThreadPoolMasterService masterService; private DisruptableClusterApplierService clusterApplierService; private ClusterService clusterService; @@ -1708,13 +1747,15 @@ class ClusterNode { private DisruptableMockTransport mockTransport; private List> extraJoinValidators = new ArrayList<>(); - ClusterNode(int nodeIndex, boolean masterEligible) { - this(nodeIndex, createDiscoveryNode(nodeIndex, masterEligible), defaultPersistedStateSupplier); + ClusterNode(int nodeIndex, boolean masterEligible, Settings nodeSettings) { + this(nodeIndex, createDiscoveryNode(nodeIndex, masterEligible), defaultPersistedStateSupplier, nodeSettings); } - ClusterNode(int nodeIndex, DiscoveryNode localNode, Function persistedStateSupplier) { + ClusterNode(int nodeIndex, DiscoveryNode localNode, Function persistedStateSupplier, + Settings nodeSettings) { this.nodeIndex = nodeIndex; this.localNode = localNode; + this.nodeSettings = nodeSettings; persistedState = persistedStateSupplier.apply(localNode); onNodeLog(localNode, this::setUp).run(); } @@ -1738,7 +1779,8 @@ protected Optional getDisruptableMockTransport(Transpo } }; - final Settings settings = Settings.builder() + final Settings settings = nodeSettings.hasValue(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey()) ? + nodeSettings : Settings.builder().put(nodeSettings) .putList(ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.getKey(), ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING.get(Settings.EMPTY)).build(); // suppress auto-bootstrap transportService = mockTransport.createTransportService( @@ -1781,17 +1823,18 @@ void close() { } ClusterNode restartedNode() { - return restartedNode(Function.identity(), Function.identity()); + return restartedNode(Function.identity(), Function.identity(), nodeSettings); } - ClusterNode restartedNode(Function adaptGlobalMetaData, Function adaptCurrentTerm) { + ClusterNode restartedNode(Function adaptGlobalMetaData, Function adaptCurrentTerm, + Settings nodeSettings) { final TransportAddress address = randomBoolean() ? buildNewFakeTransportAddress() : localNode.getAddress(); final DiscoveryNode newLocalNode = new DiscoveryNode(localNode.getName(), localNode.getId(), UUIDs.randomBase64UUID(random()), // generated deterministically for repeatable tests address.address().getHostString(), address.getAddress(), address, Collections.emptyMap(), localNode.isMasterNode() ? EnumSet.allOf(Role.class) : emptySet(), Version.CURRENT); return new ClusterNode(nodeIndex, newLocalNode, - node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetaData, adaptCurrentTerm)); + node -> new MockPersistedState(newLocalNode, persistedState, adaptGlobalMetaData, adaptCurrentTerm), nodeSettings); } private PersistedState getPersistedState() { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java index 571843126f98c..228d05c51c462 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexNameExpressionResolverTests.java @@ -580,7 +580,26 @@ public void testConcreteIndicesIgnoreIndicesEmptyRequest() { assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[]{})), equalTo(newHashSet("kuku", "testXXX"))); } + public void testConcreteIndicesNoIndicesErrorMessage() { + MetaData.Builder mdBuilder = MetaData.builder(); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, + IndicesOptions.fromOptions(false, false, true, true)); + IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, + () -> indexNameExpressionResolver.concreteIndices(context, new String[]{})); + assertThat(infe.getMessage(), is("no such index [null] and no indices exist")); + } + public void testConcreteIndicesNoIndicesErrorMessageNoExpand() { + MetaData.Builder mdBuilder = MetaData.builder(); + ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build(); + IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, + IndicesOptions.fromOptions(false, false, false, false)); + IndexNotFoundException infe = expectThrows(IndexNotFoundException.class, + () -> indexNameExpressionResolver.concreteIndices(context, new String[]{})); + assertThat(infe.getMessage(), is("no such index [_all] and no indices exist")); + } + public void testConcreteIndicesWildcardExpansion() { MetaData.Builder mdBuilder = MetaData.builder() .put(indexBuilder("testXXX").state(State.OPEN)) diff --git a/server/src/test/java/org/elasticsearch/common/NumbersTests.java b/server/src/test/java/org/elasticsearch/common/NumbersTests.java index 46378ccc9e9fb..4cab3206b7fd0 100644 --- a/server/src/test/java/org/elasticsearch/common/NumbersTests.java +++ b/server/src/test/java/org/elasticsearch/common/NumbersTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.common; +import com.carrotsearch.randomizedtesting.annotations.Timeout; import org.elasticsearch.test.ESTestCase; import java.math.BigDecimal; @@ -27,19 +28,26 @@ public class NumbersTests extends ESTestCase { + @Timeout(millis = 10000) public void testToLong() { assertEquals(3L, Numbers.toLong("3", false)); assertEquals(3L, Numbers.toLong("3.1", true)); assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.00", false)); assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.00", false)); + assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.00", true)); + assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.00", true)); + assertEquals(9223372036854775807L, Numbers.toLong("9223372036854775807.99", true)); + assertEquals(-9223372036854775808L, Numbers.toLong("-9223372036854775808.99", true)); - IllegalArgumentException e = expectThrows(IllegalArgumentException.class, - () -> Numbers.toLong("9223372036854775808", false)); - assertEquals("Value [9223372036854775808] is out of range for a long", e.getMessage()); + assertEquals("Value [9223372036854775808] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("9223372036854775808", false)).getMessage()); + assertEquals("Value [-9223372036854775809] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("-9223372036854775809", false)).getMessage()); - e = expectThrows(IllegalArgumentException.class, - () -> Numbers.toLong("-9223372036854775809", false)); - assertEquals("Value [-9223372036854775809] is out of range for a long", e.getMessage()); + assertEquals("Value [1e99999999] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("1e99999999", false)).getMessage()); + assertEquals("Value [-1e99999999] is out of range for a long", expectThrows(IllegalArgumentException.class, + () -> Numbers.toLong("-1e99999999", false)).getMessage()); } public void testToLongExact() { diff --git a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java index 40822d5a38b84..5798b5f799203 100644 --- a/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java +++ b/server/src/test/java/org/elasticsearch/common/joda/JavaJodaTimeDuellingTests.java @@ -343,6 +343,17 @@ public void testDuellingFormatsValidParsing() { assertSameDate("2012-W1-1", "weekyear_week_day"); } + public void testCompositeParsing(){ + //in all these examples the second pattern will be used + assertSameDate("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss||yyyy-MM-dd'T'HH:mm:ss.SSS"); + assertSameDate("2014-06-06T12:01:02.123", "strictDateTimeNoMillis||yyyy-MM-dd'T'HH:mm:ss.SSS"); + assertSameDate("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss+HH:MM||yyyy-MM-dd'T'HH:mm:ss.SSS"); + } + + public void testExceptionWhenCompositeParsingFails(){ + assertParseException("2014-06-06T12:01:02.123", "yyyy-MM-dd'T'HH:mm:ss||yyyy-MM-dd'T'HH:mm:ss.SS"); + } + public void testDuelingStrictParsing() { assertSameDate("2018W313", "strict_basic_week_date"); assertParseException("18W313", "strict_basic_week_date"); diff --git a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java index 60e2fa0b5687e..aea0c8c5c25f9 100644 --- a/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/MasterDisruptionIT.java @@ -19,26 +19,19 @@ package org.elasticsearch.discovery; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Priority; -import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.disruption.BlockMasterServiceOnMaster; import org.elasticsearch.test.disruption.IntermittentLongGCDisruption; -import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; @@ -46,21 +39,13 @@ import org.elasticsearch.test.junit.annotations.TestLogging; import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; -import java.util.Map; -import java.util.Objects; import java.util.Set; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; /** * Tests relating to the loss of the master. @@ -69,107 +54,6 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0) public class MasterDisruptionIT extends AbstractDisruptionTestCase { - /** - * Tests that emulates a frozen elected master node that unfreezes and pushes his cluster state to other nodes - * that already are following another elected master node. These nodes should reject this cluster state and prevent - * them from following the stale master. - */ - @TestLogging("_root:DEBUG,org.elasticsearch.cluster.service:TRACE,org.elasticsearch.test.disruption:TRACE") - public void testStaleMasterNotHijackingMajority() throws Exception { - final List nodes = startCluster(3); - - // Save the current master node as old master node, because that node will get frozen - final String oldMasterNode = internalCluster().getMasterName(); - for (String node : nodes) { - ensureStableCluster(3, node); - } - assertMaster(oldMasterNode, nodes); - - // Simulating a painful gc by suspending all threads for a long time on the current elected master node. - SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); - - // Save the majority side - final List majoritySide = new ArrayList<>(nodes); - majoritySide.remove(oldMasterNode); - - // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: - final Map>> masters = Collections.synchronizedMap(new HashMap<>()); - for (final String node : majoritySide) { - masters.put(node, new ArrayList<>()); - internalCluster().getInstance(ClusterService.class, node).addListener(event -> { - DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); - DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); - if (!Objects.equals(previousMaster, currentMaster)) { - logger.info("node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), - event.previousState()); - String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; - String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; - masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); - } - }); - } - - final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); - internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { - if (event.state().nodes().getMasterNodeId() == null) { - oldMasterNodeSteppedDown.countDown(); - } - }); - - internalCluster().setDisruptionScheme(masterNodeDisruption); - logger.info("freezing node [{}]", oldMasterNode); - masterNodeDisruption.startDisrupting(); - - // Wait for the majority side to get stable - assertDifferentMaster(majoritySide.get(0), oldMasterNode); - assertDifferentMaster(majoritySide.get(1), oldMasterNode); - - // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, - // but will be queued and once the old master node un-freezes it gets executed. - // The old master node will send this update + the cluster state where he is flagged as master to the other - // nodes that follow the new master. These nodes should ignore this update. - internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new - ClusterStateUpdateTask(Priority.IMMEDIATE) { - @Override - public ClusterState execute(ClusterState currentState) { - return ClusterState.builder(currentState).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); - } - }); - - // Save the new elected master node - final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); - logger.info("new detected master node [{}]", newMasterNode); - - // Stop disruption - logger.info("Unfreeze node [{}]", oldMasterNode); - masterNodeDisruption.stopDisrupting(); - - oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); - // Make sure that the end state is consistent on all nodes: - assertMaster(newMasterNode, nodes); - - assertThat(masters.size(), equalTo(2)); - for (Map.Entry>> entry : masters.entrySet()) { - String nodeName = entry.getKey(); - List> recordedMasterTransition = entry.getValue(); - assertThat("[" + nodeName + "] Each node should only record two master node transitions", - recordedMasterTransition, hasSize(2)); - assertThat("[" + nodeName + "] First transition's previous master should be [" + oldMasterNode + "]", - recordedMasterTransition.get(0).v1(), equalTo(oldMasterNode)); - assertThat("[" + nodeName + "] First transition's current master should be [null]", - recordedMasterTransition.get(0).v2(), nullValue()); - assertThat("[" + nodeName + "] Second transition's previous master should be [null]", - recordedMasterTransition.get(1).v1(), nullValue()); - assertThat("[" + nodeName + "] Second transition's current master should be [" + newMasterNode + "]", - recordedMasterTransition.get(1).v2(), equalTo(newMasterNode)); - } - } - /** * Test that cluster recovers from a long GC on master that causes other nodes to elect a new one */ diff --git a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java index 51fef980e3777..c4655bcf7ce9a 100644 --- a/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/StableMasterDisruptionIT.java @@ -18,28 +18,44 @@ */ package org.elasticsearch.discovery; +import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateUpdateTask; +import org.elasticsearch.cluster.coordination.Coordinator; import org.elasticsearch.cluster.coordination.FollowersChecker; import org.elasticsearch.cluster.coordination.LeaderChecker; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.disruption.LongGCDisruption; import org.elasticsearch.test.disruption.NetworkDisruption; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkDisconnect; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkLinkDisruptionType; import org.elasticsearch.test.disruption.NetworkDisruption.NetworkUnresponsive; import org.elasticsearch.test.disruption.NetworkDisruption.TwoPartitions; +import org.elasticsearch.test.disruption.SingleNodeDisruption; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.transport.MockTransportService.TestPlugin; +import org.elasticsearch.test.transport.MockTransportService; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Objects; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; import static java.util.Collections.singleton; @@ -55,7 +71,7 @@ public class StableMasterDisruptionIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singletonList(TestPlugin.class); + return Collections.singletonList(MockTransportService.TestPlugin.class); } /** @@ -152,4 +168,101 @@ private void testFollowerCheckerAfterMasterReelection(NetworkLinkDisruptionType networkDisruption.stopDisrupting(); ensureStableCluster(3); } + + + /** + * Tests that emulates a frozen elected master node that unfreezes and pushes its cluster state to other nodes that already are + * following another elected master node. These nodes should reject this cluster state and prevent them from following the stale master. + */ + public void testStaleMasterNotHijackingMajority() throws Exception { + final List nodes = internalCluster().startNodes(3, Settings.builder() + .put(LeaderChecker.LEADER_CHECK_TIMEOUT_SETTING.getKey(), "1s") + .put(Coordinator.PUBLISH_TIMEOUT_SETTING.getKey(), "1s") + .build()); + ensureStableCluster(3); + + // Save the current master node as old master node, because that node will get frozen + final String oldMasterNode = internalCluster().getMasterName(); + + // Simulating a painful gc by suspending all threads for a long time on the current elected master node. + SingleNodeDisruption masterNodeDisruption = new LongGCDisruption(random(), oldMasterNode); + + // Save the majority side + final List majoritySide = new ArrayList<>(nodes); + majoritySide.remove(oldMasterNode); + + // Keeps track of the previous and current master when a master node transition took place on each node on the majority side: + final Map>> masters = Collections.synchronizedMap(new HashMap<>()); + for (final String node : majoritySide) { + masters.put(node, new ArrayList<>()); + internalCluster().getInstance(ClusterService.class, node).addListener(event -> { + DiscoveryNode previousMaster = event.previousState().nodes().getMasterNode(); + DiscoveryNode currentMaster = event.state().nodes().getMasterNode(); + if (!Objects.equals(previousMaster, currentMaster)) { + logger.info("--> node {} received new cluster state: {} \n and had previous cluster state: {}", node, event.state(), + event.previousState()); + String previousMasterNodeName = previousMaster != null ? previousMaster.getName() : null; + String currentMasterNodeName = currentMaster != null ? currentMaster.getName() : null; + masters.get(node).add(new Tuple<>(previousMasterNodeName, currentMasterNodeName)); + } + }); + } + + final CountDownLatch oldMasterNodeSteppedDown = new CountDownLatch(1); + internalCluster().getInstance(ClusterService.class, oldMasterNode).addListener(event -> { + if (event.state().nodes().getMasterNodeId() == null) { + oldMasterNodeSteppedDown.countDown(); + } + }); + + internalCluster().setDisruptionScheme(masterNodeDisruption); + logger.info("--> freezing node [{}]", oldMasterNode); + masterNodeDisruption.startDisrupting(); + + // Wait for majority side to elect a new master + assertBusy(() -> { + for (final Map.Entry>> entry : masters.entrySet()) { + final List> transitions = entry.getValue(); + assertTrue(entry.getKey() + ": " + transitions, + transitions.stream().anyMatch(transition -> transition.v2() != null)); + } + }); + + // The old master node is frozen, but here we submit a cluster state update task that doesn't get executed, but will be queued and + // once the old master node un-freezes it gets executed. The old master node will send this update + the cluster state where it is + // flagged as master to the other nodes that follow the new master. These nodes should ignore this update. + internalCluster().getInstance(ClusterService.class, oldMasterNode).submitStateUpdateTask("sneaky-update", new + ClusterStateUpdateTask(Priority.IMMEDIATE) { + @Override + public ClusterState execute(ClusterState currentState) { + return ClusterState.builder(currentState).build(); + } + + @Override + public void onFailure(String source, Exception e) { + logger.warn(() -> new ParameterizedMessage("failure [{}]", source), e); + } + }); + + // Save the new elected master node + final String newMasterNode = internalCluster().getMasterName(majoritySide.get(0)); + logger.info("--> new detected master node [{}]", newMasterNode); + + // Stop disruption + logger.info("--> unfreezing node [{}]", oldMasterNode); + masterNodeDisruption.stopDisrupting(); + + oldMasterNodeSteppedDown.await(30, TimeUnit.SECONDS); + logger.info("--> [{}] stepped down as master", oldMasterNode); + ensureStableCluster(3); + + assertThat(masters.size(), equalTo(2)); + for (Map.Entry>> entry : masters.entrySet()) { + String nodeName = entry.getKey(); + List> transitions = entry.getValue(); + assertTrue("[" + nodeName + "] should not apply state from old master [" + oldMasterNode + "] but it did: " + transitions, + transitions.stream().noneMatch(t -> oldMasterNode.equals(t.v2()))); + } + } + } diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java index e16389a38471f..e0fc4a4d5392c 100644 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java +++ b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryIT.java @@ -19,13 +19,22 @@ package org.elasticsearch.discovery.single; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.core.LogEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.coordination.JoinHelper; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.node.Node; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.MockHttpTransport; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.NodeConfigurationSource; +import org.elasticsearch.transport.RemoteTransportException; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -105,6 +114,78 @@ public Path nodeConfigPath(int nodeOrdinal) { } } + public void testCannotJoinNodeWithSingleNodeDiscovery() throws Exception { + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.start(); + mockAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "test", + JoinHelper.class.getCanonicalName(), + Level.INFO, + "failed to join") { + + @Override + public boolean innerMatch(final LogEvent event) { + return event.getThrown() != null + && event.getThrown().getClass() == RemoteTransportException.class + && event.getThrown().getCause() != null + && event.getThrown().getCause().getClass() == IllegalStateException.class + && event.getThrown().getCause().getMessage().contains( + "cannot join node with [discovery.type] set to [single-node]"); + } + }); + final TransportService service = internalCluster().getInstance(TransportService.class); + final int port = service.boundAddress().publishAddress().getPort(); + final NodeConfigurationSource configurationSource = new NodeConfigurationSource() { + @Override + public Settings nodeSettings(int nodeOrdinal) { + return Settings + .builder() + .put("discovery.type", "zen") + .put("transport.type", getTestTransportType()) + .put(Node.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "0s") + /* + * We align the port ranges of the two as then with zen discovery these two + * nodes would find each other. + */ + .put("transport.port", port + "-" + (port + 5 - 1)) + .build(); + } + + @Override + public Path nodeConfigPath(int nodeOrdinal) { + return null; + } + }; + try (InternalTestCluster other = + new InternalTestCluster( + randomLong(), + createTempDir(), + false, + false, + 1, + 1, + internalCluster().getClusterName(), + configurationSource, + 0, + "other", + Arrays.asList(getTestTransportPlugin(), MockHttpTransport.TestPlugin.class), + Function.identity())) { + + Logger clusterLogger = LogManager.getLogger(JoinHelper.class); + Loggers.addAppender(clusterLogger, mockAppender); + try { + other.beforeTest(random(), 0); + final ClusterState first = internalCluster().getInstance(ClusterService.class).state(); + assertThat(first.nodes().getSize(), equalTo(1)); + assertBusy(() -> mockAppender.assertAllExpectationsMatched()); + } finally { + Loggers.removeAppender(clusterLogger, mockAppender); + mockAppender.stop(); + } + } + } + public void testStatePersistence() throws Exception { createIndex("test"); internalCluster().fullRestart(); diff --git a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java b/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java deleted file mode 100644 index c3dfad2d43792..0000000000000 --- a/server/src/test/java/org/elasticsearch/discovery/single/SingleNodeDiscoveryTests.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.discovery.single; - -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.Version; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.service.ClusterApplier; -import org.elasticsearch.cluster.service.MasterService; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.transport.MockTransportService; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; - -import java.io.Closeable; -import java.util.Stack; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Supplier; - -import static org.elasticsearch.test.ClusterServiceUtils.createMasterService; -import static org.hamcrest.Matchers.equalTo; - -public class SingleNodeDiscoveryTests extends ESTestCase { - - public void testInitialJoin() throws Exception { - final Settings settings = Settings.EMPTY; - final Version version = Version.CURRENT; - final ThreadPool threadPool = new TestThreadPool(getClass().getName()); - final Stack stack = new Stack<>(); - try { - final MockTransportService transportService = - MockTransportService.createNewService(settings, version, threadPool, null); - stack.push(transportService); - transportService.start(); - final DiscoveryNode node = transportService.getLocalNode(); - final MasterService masterService = createMasterService(threadPool, node); - AtomicReference clusterState = new AtomicReference<>(); - final SingleNodeDiscovery discovery = - new SingleNodeDiscovery(Settings.EMPTY, transportService, - masterService, new ClusterApplier() { - @Override - public void setInitialState(ClusterState initialState) { - clusterState.set(initialState); - } - - @Override - public void onNewClusterState(String source, Supplier clusterStateSupplier, - ClusterApplyListener listener) { - clusterState.set(clusterStateSupplier.get()); - listener.onSuccess(source); - } - }, null); - discovery.start(); - discovery.startInitialJoin(); - final DiscoveryNodes nodes = clusterState.get().nodes(); - assertThat(nodes.getSize(), equalTo(1)); - assertThat(nodes.getMasterNode().getId(), equalTo(node.getId())); - } finally { - while (!stack.isEmpty()) { - IOUtils.closeWhileHandlingException(stack.pop()); - } - terminate(threadPool); - } - } - -} diff --git a/server/src/test/java/org/elasticsearch/get/GetActionIT.java b/server/src/test/java/org/elasticsearch/get/GetActionIT.java index 77303995f7494..8be9a991d17e9 100644 --- a/server/src/test/java/org/elasticsearch/get/GetActionIT.java +++ b/server/src/test/java/org/elasticsearch/get/GetActionIT.java @@ -441,7 +441,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[1]: version conflict")); assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); //Version from Lucene index @@ -464,7 +464,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getResponse().getSourceAsMap().get("field").toString(), equalTo("value1")); assertThat(response.getResponses()[2].getFailure(), notNullValue()); assertThat(response.getResponses()[2].getFailure().getId(), equalTo("1")); - assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[type1][1]: version conflict")); + assertThat(response.getResponses()[2].getFailure().getMessage(), startsWith("[1]: version conflict")); assertThat(response.getResponses()[2].getFailure().getFailure(), instanceOf(VersionConflictEngineException.class)); @@ -489,7 +489,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[2]: version conflict")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); @@ -515,7 +515,7 @@ public void testMultiGetWithVersion() throws Exception { assertThat(response.getResponses()[1].getFailure(), notNullValue()); assertThat(response.getResponses()[1].getFailure().getId(), equalTo("2")); assertThat(response.getResponses()[1].getIndex(), equalTo("test")); - assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[type1][2]: version conflict")); + assertThat(response.getResponses()[1].getFailure().getMessage(), startsWith("[2]: version conflict")); assertThat(response.getResponses()[2].getId(), equalTo("2")); assertThat(response.getResponses()[2].getIndex(), equalTo("test")); assertThat(response.getResponses()[2].getFailure(), nullValue()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 5f6659afd7397..351cccdff4aa0 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -89,6 +89,7 @@ import java.util.function.Function; import static java.util.Collections.emptyMap; +import static org.elasticsearch.index.IndexService.IndexCreationContext.CREATE_INDEX; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasToString; import static org.hamcrest.Matchers.instanceOf; @@ -148,8 +149,8 @@ public void tearDown() throws Exception { } private IndexService newIndexService(IndexModule module) throws IOException { - return module.newIndexService(nodeEnvironment, xContentRegistry(), deleter, circuitBreakerService, bigArrays, threadPool, - scriptService, null, indicesQueryCache, mapperRegistry, + return module.newIndexService(CREATE_INDEX, nodeEnvironment, xContentRegistry(), deleter, circuitBreakerService, bigArrays, + threadPool, scriptService, null, indicesQueryCache, mapperRegistry, new IndicesFieldDataCache(settings, listener), writableRegistry()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java index f5597ecb1f443..2142fca565c9b 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/GeoPointFieldMapperTests.java @@ -523,5 +523,15 @@ public void testInvalidGeopointValuesIgnored() throws Exception { BytesReference.bytes(XContentFactory.jsonBuilder() .startObject().field("location", "NaN,12").endObject() ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); + + assertThat(defaultMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject().startObject("location").nullField("lat").field("lon", 1).endObject().endObject() + ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); + + assertThat(defaultMapper.parse(new SourceToParse("test", "type", "1", + BytesReference.bytes(XContentFactory.jsonBuilder() + .startObject().startObject("location").nullField("lat").nullField("lon").endObject().endObject() + ), XContentType.JSON)).rootDoc().getField("location"), nullValue()); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java index ba7f5d846840a..b4b9242daa456 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/NumberFieldMapperTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper; +import com.carrotsearch.randomizedtesting.annotations.Timeout; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; import org.elasticsearch.common.Strings; @@ -367,17 +368,20 @@ public void testEmptyName() throws IOException { } } + @Timeout(millis = 30000) public void testOutOfRangeValues() throws IOException { final List> inputs = Arrays.asList( OutOfRangeSpec.of(NumberType.BYTE, "128", "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, "32768", "is out of range for a short"), OutOfRangeSpec.of(NumberType.INTEGER, "2147483648", "is out of range for an integer"), OutOfRangeSpec.of(NumberType.LONG, "9223372036854775808", "out of range for a long"), + OutOfRangeSpec.of(NumberType.LONG, "1e999999999", "out of range for a long"), OutOfRangeSpec.of(NumberType.BYTE, "-129", "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, "-32769", "is out of range for a short"), OutOfRangeSpec.of(NumberType.INTEGER, "-2147483649", "is out of range for an integer"), OutOfRangeSpec.of(NumberType.LONG, "-9223372036854775809", "out of range for a long"), + OutOfRangeSpec.of(NumberType.LONG, "-1e999999999", "out of range for a long"), OutOfRangeSpec.of(NumberType.BYTE, 128, "is out of range for a byte"), OutOfRangeSpec.of(NumberType.SHORT, 32768, "out of range of Java short"), @@ -419,6 +423,10 @@ public void testOutOfRangeValues() throws IOException { e.getCause().getMessage(), containsString(item.message)); } } + + // the following two strings are in-range for a long after coercion + parseRequest(NumberType.LONG, createIndexRequest("9223372036854775807.9")); + parseRequest(NumberType.LONG, createIndexRequest("-9223372036854775808.9")); } private void parseRequest(NumberType type, BytesReference content) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilderTests.java new file mode 100644 index 0000000000000..b3a3a2512a5ff --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilderTests.java @@ -0,0 +1,284 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.analysis.MockSynonymAnalyzer; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanClause; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.FuzzyQuery; +import org.apache.lucene.search.PrefixQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.SynonymQuery; +import org.apache.lucene.search.TermQuery; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.lucene.search.Queries; +import org.elasticsearch.index.search.MatchQuery; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.Arrays.asList; +import static org.hamcrest.CoreMatchers.anyOf; +import static org.hamcrest.CoreMatchers.everyItem; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.equalToIgnoringCase; +import static org.hamcrest.Matchers.hasProperty; +import static org.hamcrest.Matchers.hasSize; + +public class MatchBoolPrefixQueryBuilderTests extends AbstractQueryTestCase { + + @Override + protected MatchBoolPrefixQueryBuilder doCreateTestQueryBuilder() { + final String fieldName = randomFrom(STRING_FIELD_NAME, STRING_ALIAS_FIELD_NAME); + final Object value = IntStream.rangeClosed(0, randomIntBetween(0, 3)) + .mapToObj(i -> randomAlphaOfLengthBetween(1, 10) + " ") + .collect(Collectors.joining()) + .trim(); + + final MatchBoolPrefixQueryBuilder queryBuilder = new MatchBoolPrefixQueryBuilder(fieldName, value); + + if (randomBoolean() && isTextField(fieldName)) { + queryBuilder.analyzer(randomFrom("simple", "keyword", "whitespace")); + } + + if (randomBoolean()) { + queryBuilder.operator(randomFrom(Operator.values())); + } + + if (randomBoolean()) { + queryBuilder.minimumShouldMatch(randomMinimumShouldMatch()); + } + + if (randomBoolean()) { + queryBuilder.fuzziness(randomFuzziness(fieldName)); + } + + if (randomBoolean()) { + queryBuilder.prefixLength(randomIntBetween(0, 10)); + } + + if (randomBoolean()) { + queryBuilder.maxExpansions(randomIntBetween(1, 1000)); + } + + if (randomBoolean()) { + queryBuilder.fuzzyTranspositions(randomBoolean()); + } + + if (randomBoolean()) { + queryBuilder.fuzzyRewrite(getRandomRewriteMethod()); + } + + return queryBuilder; + } + + @Override + protected void doAssertLuceneQuery(MatchBoolPrefixQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + assertThat(query, notNullValue()); + assertThat(query, anyOf(instanceOf(BooleanQuery.class), instanceOf(PrefixQuery.class))); + + if (query instanceof PrefixQuery) { + final PrefixQuery prefixQuery = (PrefixQuery) query; + assertThat(prefixQuery.getPrefix().text(), equalToIgnoringCase((String) queryBuilder.value())); + } else { + assertThat(query, instanceOf(BooleanQuery.class)); + final BooleanQuery booleanQuery = (BooleanQuery) query; + // all queries except the last should be TermQuery or SynonymQuery + final Set allQueriesExceptLast = IntStream.range(0, booleanQuery.clauses().size() - 1) + .mapToObj(booleanQuery.clauses()::get) + .map(BooleanClause::getQuery) + .collect(Collectors.toSet()); + assertThat(allQueriesExceptLast, anyOf( + everyItem(instanceOf(TermQuery.class)), + everyItem(instanceOf(SynonymQuery.class)), + everyItem(instanceOf(FuzzyQuery.class)) + )); + + if (allQueriesExceptLast.stream().anyMatch(subQuery -> subQuery instanceof FuzzyQuery)) { + assertThat(queryBuilder.fuzziness(), notNullValue()); + } + allQueriesExceptLast.stream().filter(subQuery -> subQuery instanceof FuzzyQuery).forEach(subQuery -> { + final FuzzyQuery fuzzyQuery = (FuzzyQuery) subQuery; + assertThat(fuzzyQuery.getPrefixLength(), equalTo(queryBuilder.prefixLength())); + assertThat(fuzzyQuery.getTranspositions(), equalTo(queryBuilder.fuzzyTranspositions())); + }); + + // the last query should be PrefixQuery + final Query shouldBePrefixQuery = booleanQuery.clauses().get(booleanQuery.clauses().size() - 1).getQuery(); + assertThat(shouldBePrefixQuery, instanceOf(PrefixQuery.class)); + + if (queryBuilder.minimumShouldMatch() != null) { + final int optionalClauses = + (int) booleanQuery.clauses().stream().filter(clause -> clause.getOccur() == BooleanClause.Occur.SHOULD).count(); + final int expected = Queries.calculateMinShouldMatch(optionalClauses, queryBuilder.minimumShouldMatch()); + assertThat(booleanQuery.getMinimumNumberShouldMatch(), equalTo(expected)); + } + } + } + + public void testIllegalValues() { + { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MatchBoolPrefixQueryBuilder(null, "value")); + assertEquals("[match_bool_prefix] requires fieldName", e.getMessage()); + } + + { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new MatchBoolPrefixQueryBuilder("name", null)); + assertEquals("[match_bool_prefix] requires query value", e.getMessage()); + } + + { + final MatchBoolPrefixQueryBuilder builder = new MatchBoolPrefixQueryBuilder("name", "value"); + builder.analyzer("bogusAnalyzer"); + QueryShardException e = expectThrows(QueryShardException.class, () -> builder.toQuery(createShardContext())); + assertThat(e.getMessage(), containsString("analyzer [bogusAnalyzer] not found")); + } + } + + public void testFromSimpleJson() throws IOException { + final String simple = + "{" + + "\"match_bool_prefix\": {" + + "\"fieldName\": \"fieldValue\"" + + "}" + + "}"; + final String expected = + "{" + + "\"match_bool_prefix\": {" + + "\"fieldName\": {" + + "\"query\": \"fieldValue\"," + + "\"operator\": \"OR\"," + + "\"prefix_length\": 0," + + "\"max_expansions\": 50," + + "\"fuzzy_transpositions\": true," + + "\"boost\": 1.0" + + "}" + + "}" + + "}"; + + final MatchBoolPrefixQueryBuilder builder = (MatchBoolPrefixQueryBuilder) parseQuery(simple); + checkGeneratedJson(expected, builder); + } + + public void testFromJson() throws IOException { + final String expected = + "{" + + "\"match_bool_prefix\": {" + + "\"fieldName\": {" + + "\"query\": \"fieldValue\"," + + "\"analyzer\": \"simple\"," + + "\"operator\": \"AND\"," + + "\"minimum_should_match\": \"2\"," + + "\"fuzziness\": \"1\"," + + "\"prefix_length\": 1," + + "\"max_expansions\": 10," + + "\"fuzzy_transpositions\": false," + + "\"fuzzy_rewrite\": \"constant_score\"," + + "\"boost\": 2.0" + + "}" + + "}" + + "}"; + + final MatchBoolPrefixQueryBuilder builder = (MatchBoolPrefixQueryBuilder) parseQuery(expected); + checkGeneratedJson(expected, builder); + } + + public void testParseFailsWithMultipleFields() { + { + final String json = + "{" + + "\"match_bool_prefix\" : {" + + "\"field_name_1\" : {" + + "\"query\" : \"foo\"" + + "}," + + "\"field_name_2\" : {" + + "\"query\" : \"foo\"\n" + + "}" + + "}" + + "}"; + final ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(json)); + assertEquals( + "[match_bool_prefix] query doesn't support multiple fields, found [field_name_1] and [field_name_2]", e.getMessage()); + } + + { + final String simpleJson = + "{" + + "\"match_bool_prefix\" : {" + + "\"field_name_1\" : \"foo\"," + + "\"field_name_2\" : \"foo\"" + + "}" + + "}"; + final ParsingException e = expectThrows(ParsingException.class, () -> parseQuery(simpleJson)); + assertEquals( + "[match_bool_prefix] query doesn't support multiple fields, found [field_name_1] and [field_name_2]", e.getMessage()); + } + } + + public void testAnalysis() throws Exception { + final MatchBoolPrefixQueryBuilder builder = new MatchBoolPrefixQueryBuilder(STRING_FIELD_NAME, "foo bar baz"); + final Query query = builder.toQuery(createShardContext()); + + assertBooleanQuery(query, asList( + new TermQuery(new Term(STRING_FIELD_NAME, "foo")), + new TermQuery(new Term(STRING_FIELD_NAME, "bar")), + new PrefixQuery(new Term(STRING_FIELD_NAME, "baz")) + )); + } + + public void testAnalysisSynonym() throws Exception { + final MatchQuery matchQuery = new MatchQuery(createShardContext()); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + final Query query = matchQuery.parse(MatchQuery.Type.BOOLEAN_PREFIX, STRING_FIELD_NAME, "fox dogs red"); + + assertBooleanQuery(query, asList( + new TermQuery(new Term(STRING_FIELD_NAME, "fox")), + new SynonymQuery(new Term(STRING_FIELD_NAME, "dogs"), new Term(STRING_FIELD_NAME, "dog")), + new PrefixQuery(new Term(STRING_FIELD_NAME, "red")) + )); + } + + public void testAnalysisSingleTerm() throws Exception { + final MatchBoolPrefixQueryBuilder builder = new MatchBoolPrefixQueryBuilder(STRING_FIELD_NAME, "foo"); + final Query query = builder.toQuery(createShardContext()); + assertThat(query, equalTo(new PrefixQuery(new Term(STRING_FIELD_NAME, "foo")))); + } + + private static void assertBooleanQuery(Query actual, List expectedClauseQueries) { + assertThat(actual, instanceOf(BooleanQuery.class)); + final BooleanQuery actualBooleanQuery = (BooleanQuery) actual; + assertThat(actualBooleanQuery.clauses(), hasSize(expectedClauseQueries.size())); + assertThat(actualBooleanQuery.clauses(), everyItem(hasProperty("occur", equalTo(BooleanClause.Occur.SHOULD)))); + + for (int i = 0; i < actualBooleanQuery.clauses().size(); i++) { + final Query clauseQuery = actualBooleanQuery.clauses().get(i).getQuery(); + assertThat(clauseQuery, equalTo(expectedClauseQueries.get(i))); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java index c258cce6c7c50..e9f2b447da133 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MatchQueryBuilderTests.java @@ -21,6 +21,7 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.CannedBinaryTokenStream; +import org.apache.lucene.analysis.MockSynonymAnalyzer; import org.apache.lucene.index.Term; import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.BooleanClause; @@ -28,6 +29,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; @@ -394,6 +396,76 @@ public void testLenientPhraseQuery() throws Exception { containsString("field:[string_no_pos] was indexed without position data; cannot run PhraseQuery")); } + public void testAutoGenerateSynonymsPhraseQuery() throws Exception { + final MatchQuery matchQuery = new MatchQuery(createShardContext()); + matchQuery.setAnalyzer(new MockSynonymAnalyzer()); + + { + matchQuery.setAutoGenerateSynonymsPhraseQuery(false); + final Query query = matchQuery.parse(Type.BOOLEAN, STRING_FIELD_NAME, "guinea pig"); + final Query expectedQuery = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), BooleanClause.Occur.MUST) + .build(), + BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD).build(); + assertThat(query, equalTo(expectedQuery)); + } + + { + matchQuery.setAutoGenerateSynonymsPhraseQuery(true); + final Query query = matchQuery.parse(Type.BOOLEAN, STRING_FIELD_NAME, "guinea pig"); + final Query expectedQuery = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new PhraseQuery.Builder() + .add(new Term(STRING_FIELD_NAME, "guinea")) + .add(new Term(STRING_FIELD_NAME, "pig")) + .build(), + BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD).build(); + assertThat(query, equalTo(expectedQuery)); + } + + { + matchQuery.setAutoGenerateSynonymsPhraseQuery(false); + final Query query = matchQuery.parse(Type.BOOLEAN_PREFIX, STRING_FIELD_NAME, "guinea pig"); + final Query expectedQuery = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(new TermQuery(new Term(STRING_FIELD_NAME, "guinea")), BooleanClause.Occur.MUST) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "pig")), BooleanClause.Occur.MUST) + .build(), + BooleanClause.Occur.SHOULD) + .add(new TermQuery(new Term(STRING_FIELD_NAME, "cavy")), BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD).build(); + assertThat(query, equalTo(expectedQuery)); + } + + { + matchQuery.setAutoGenerateSynonymsPhraseQuery(true); + final Query query = matchQuery.parse(Type.BOOLEAN_PREFIX, STRING_FIELD_NAME, "guinea pig"); + final MultiPhrasePrefixQuery guineaPig = new MultiPhrasePrefixQuery(STRING_FIELD_NAME); + guineaPig.add(new Term(STRING_FIELD_NAME, "guinea")); + guineaPig.add(new Term(STRING_FIELD_NAME, "pig")); + final MultiPhrasePrefixQuery cavy = new MultiPhrasePrefixQuery(STRING_FIELD_NAME); + cavy.add(new Term(STRING_FIELD_NAME, "cavy")); + final Query expectedQuery = new BooleanQuery.Builder() + .add(new BooleanQuery.Builder() + .add(guineaPig, BooleanClause.Occur.SHOULD) + .add(cavy, BooleanClause.Occur.SHOULD) + .build(), + BooleanClause.Occur.SHOULD).build(); + assertThat(query, equalTo(expectedQuery)); + } + } + public void testMaxBooleanClause() { MatchQuery query = new MatchQuery(createShardContext()); query.setAnalyzer(new MockGraphAnalyzer(createGiantGraph(40))); diff --git a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java index 36ba370939b17..7ca722fc31139 100644 --- a/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/MultiMatchQueryBuilderTests.java @@ -31,6 +31,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.PhraseQuery; import org.apache.lucene.search.PointRangeQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -52,10 +53,11 @@ import static org.elasticsearch.index.query.QueryBuilders.multiMatchQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBooleanSubQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertDisjunctionSubQuery; +import static org.hamcrest.CoreMatchers.anyOf; import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.either; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.collection.IsCollectionWithSize.hasSize; public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase { @@ -91,10 +93,11 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { // sets other parameters of the multi match query if (randomBoolean()) { - if (fieldName.equals(STRING_FIELD_NAME)) { + if (fieldName.equals(STRING_FIELD_NAME) || fieldName.equals(STRING_ALIAS_FIELD_NAME) || fieldName.equals(STRING_FIELD_NAME_2)) { query.type(randomFrom(MultiMatchQueryBuilder.Type.values())); } else { - query.type(randomValueOtherThan(MultiMatchQueryBuilder.Type.PHRASE_PREFIX, + query.type(randomValueOtherThanMany( + (type) -> type == Type.PHRASE_PREFIX || type == Type.BOOL_PREFIX, () -> randomFrom(MultiMatchQueryBuilder.Type.values()))); } } @@ -104,7 +107,7 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean() && fieldName.equals(STRING_FIELD_NAME)) { query.analyzer(randomAnalyzer()); } - if (randomBoolean()) { + if (randomBoolean() && query.type() != Type.BOOL_PREFIX) { query.slop(randomIntBetween(0, 5)); } if (fieldName.equals(STRING_FIELD_NAME) && randomBoolean() && @@ -126,7 +129,7 @@ protected MultiMatchQueryBuilder doCreateTestQueryBuilder() { if (randomBoolean()) { query.tieBreaker(randomFloat()); } - if (randomBoolean()) { + if (randomBoolean() && query.type() != Type.BOOL_PREFIX) { query.cutoffFrequency((float) 10 / randomIntBetween(1, 100)); } if (randomBoolean()) { @@ -158,12 +161,21 @@ protected Map getAlternateVersions() { @Override protected void doAssertLuceneQuery(MultiMatchQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { // we rely on integration tests for deeper checks here - assertThat(query, either(instanceOf(BoostQuery.class)).or(instanceOf(TermQuery.class)) - .or(instanceOf(BooleanQuery.class)).or(instanceOf(DisjunctionMaxQuery.class)) - .or(instanceOf(FuzzyQuery.class)).or(instanceOf(MultiPhrasePrefixQuery.class)) - .or(instanceOf(MatchAllDocsQuery.class)).or(instanceOf(ExtendedCommonTermsQuery.class)) - .or(instanceOf(MatchNoDocsQuery.class)).or(instanceOf(PhraseQuery.class)) - .or(instanceOf(PointRangeQuery.class)).or(instanceOf(IndexOrDocValuesQuery.class))); + assertThat(query, anyOf(Arrays.asList( + instanceOf(BoostQuery.class), + instanceOf(TermQuery.class), + instanceOf(BooleanQuery.class), + instanceOf(DisjunctionMaxQuery.class), + instanceOf(FuzzyQuery.class), + instanceOf(MultiPhrasePrefixQuery.class), + instanceOf(MatchAllDocsQuery.class), + instanceOf(ExtendedCommonTermsQuery.class), + instanceOf(MatchNoDocsQuery.class), + instanceOf(PhraseQuery.class), + instanceOf(PointRangeQuery.class), + instanceOf(IndexOrDocValuesQuery.class), + instanceOf(PrefixQuery.class) + ))); } public void testIllegaArguments() { @@ -240,6 +252,51 @@ public void testToQueryFieldMissing() throws Exception { instanceOf(MatchNoDocsQuery.class)); } + public void testToQueryBooleanPrefixSingleField() throws IOException { + final MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("foo bar", STRING_FIELD_NAME); + builder.type(Type.BOOL_PREFIX); + final Query query = builder.toQuery(createShardContext()); + assertThat(query, instanceOf(BooleanQuery.class)); + final BooleanQuery booleanQuery = (BooleanQuery) query; + assertThat(booleanQuery.clauses(), hasSize(2)); + assertThat(assertBooleanSubQuery(booleanQuery, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "foo"))); + assertThat(assertBooleanSubQuery(booleanQuery, PrefixQuery.class, 1).getPrefix(), equalTo(new Term(STRING_FIELD_NAME, "bar"))); + } + + public void testToQueryBooleanPrefixMultipleFields() throws IOException { + { + final MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("foo bar", STRING_FIELD_NAME, STRING_ALIAS_FIELD_NAME); + builder.type(Type.BOOL_PREFIX); + final Query query = builder.toQuery(createShardContext()); + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + final DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) query; + assertThat(disMaxQuery.getDisjuncts(), hasSize(2)); + for (Query disjunctQuery : disMaxQuery.getDisjuncts()) { + assertThat(disjunctQuery, instanceOf(BooleanQuery.class)); + final BooleanQuery booleanQuery = (BooleanQuery) disjunctQuery; + assertThat(booleanQuery.clauses(), hasSize(2)); + assertThat(assertBooleanSubQuery(booleanQuery, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "foo"))); + assertThat(assertBooleanSubQuery(booleanQuery, PrefixQuery.class, 1).getPrefix(), + equalTo(new Term(STRING_FIELD_NAME, "bar"))); + } + } + + { + // STRING_FIELD_NAME_2 is a keyword field + final MultiMatchQueryBuilder queryBuilder = new MultiMatchQueryBuilder("foo bar", STRING_FIELD_NAME, STRING_FIELD_NAME_2); + queryBuilder.type(Type.BOOL_PREFIX); + final Query query = queryBuilder.toQuery(createShardContext()); + assertThat(query, instanceOf(DisjunctionMaxQuery.class)); + final DisjunctionMaxQuery disMaxQuery = (DisjunctionMaxQuery) query; + assertThat(disMaxQuery.getDisjuncts(), hasSize(2)); + final BooleanQuery firstDisjunct = assertDisjunctionSubQuery(disMaxQuery, BooleanQuery.class, 0); + assertThat(firstDisjunct.clauses(), hasSize(2)); + assertThat(assertBooleanSubQuery(firstDisjunct, TermQuery.class, 0).getTerm(), equalTo(new Term(STRING_FIELD_NAME, "foo"))); + final PrefixQuery secondDisjunct = assertDisjunctionSubQuery(disMaxQuery, PrefixQuery.class, 1); + assertThat(secondDisjunct.getPrefix(), equalTo(new Term(STRING_FIELD_NAME_2, "foo bar"))); + } + } + public void testFromJson() throws IOException { String json = "{\n" + diff --git a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java index ee916dd4c47dd..1a85e29f02090 100644 --- a/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/geo/GeoUtilsTests.java @@ -397,6 +397,8 @@ public void testParseGeoPoint() throws IOException { parser.nextToken(); GeoPoint point = GeoUtils.parseGeoPoint(parser); assertThat(point, equalTo(new GeoPoint(lat, lon))); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } json = jsonBuilder().startObject().field("lat", String.valueOf(lat)).field("lon", String.valueOf(lon)).endObject(); try (XContentParser parser = createParser(json)) { @@ -438,6 +440,21 @@ public void testParseGeoPointStringZValueError() throws IOException { } } + public void testParseGeoPointArrayZValueError() throws IOException { + double lat = randomDouble() * 180 - 90 + randomIntBetween(-1000, 1000) * 180; + double lon = randomDouble() * 360 - 180 + randomIntBetween(-1000, 1000) * 360; + double alt = randomDouble() * 1000; + XContentBuilder json = jsonBuilder().startArray().value(lat).value(lon).value(alt).endArray(); + try (XContentParser parser = createParser(json)) { + parser.nextToken(); + Exception e = expectThrows(ElasticsearchParseException.class, + () -> GeoUtils.parseGeoPoint(parser, new GeoPoint(), false)); + assertThat(e.getMessage(), containsString("but [ignore_z_value] parameter is [false]")); + assertThat(parser.currentToken(), is(Token.END_ARRAY)); + assertNull(parser.nextToken()); + } + } + public void testParseGeoPointGeohash() throws IOException { for (int i = 0; i < 100; i++) { int geoHashLength = randomIntBetween(1, GeoHashUtils.PRECISION); @@ -451,6 +468,8 @@ public void testParseGeoPointGeohash() throws IOException { GeoPoint point = GeoUtils.parseGeoPoint(parser); assertThat(point.lat(), allOf(lessThanOrEqualTo(90.0), greaterThanOrEqualTo(-90.0))); assertThat(point.lon(), allOf(lessThanOrEqualTo(180.0), greaterThanOrEqualTo(-180.0))); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } json = jsonBuilder().startObject().field("geohash", geohashBuilder.toString()).endObject(); try (XContentParser parser = createParser(json)) { @@ -470,6 +489,8 @@ public void testParseGeoPointGeohashWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), containsString("geohash must be a string")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -480,6 +501,8 @@ public void testParseGeoPointLatNoLon() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field [lon] missing")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -490,6 +513,8 @@ public void testParseGeoPointLonNoLat() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("field [lat] missing")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -500,6 +525,8 @@ public void testParseGeoPointLonWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("longitude must be a number")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -510,6 +537,8 @@ public void testParseGeoPointLatWrongType() throws IOException { parser.nextToken(); Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("latitude must be a number")); + assertThat(parser.currentToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } @@ -578,6 +607,9 @@ public void testParseGeoPointArrayWrongType() throws IOException { } Exception e = expectThrows(ElasticsearchParseException.class, () -> GeoUtils.parseGeoPoint(parser)); assertThat(e.getMessage(), is("numeric value expected")); + assertThat(parser.currentToken(), is(Token.END_ARRAY)); + assertThat(parser.nextToken(), is(Token.END_OBJECT)); + assertNull(parser.nextToken()); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index 78d4428911537..8e2403aedc26c 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -82,6 +82,8 @@ import org.elasticsearch.test.IndexSettingsModule; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.threadpool.ThreadPoolStats; import java.io.IOException; import java.io.UncheckedIOException; @@ -738,8 +740,7 @@ public void onFailure(Exception e) { t.join(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/39565") - public void testPendingRefreshWithIntervalChange() throws InterruptedException { + public void testPendingRefreshWithIntervalChange() throws Exception { Settings.Builder builder = Settings.builder(); builder.put(IndexSettings.INDEX_SEARCH_IDLE_AFTER.getKey(), TimeValue.ZERO); IndexService indexService = createIndex("test", builder.build()); @@ -767,7 +768,14 @@ public void testPendingRefreshWithIntervalChange() throws InterruptedException { // wait for both to ensure we don't have in-flight operations updateSettingsLatch.await(); refreshLatch.await(); - + // ensure no scheduled refresh to compete with the scheduleRefresh we are going to verify. + assertBusy(() -> { + for (ThreadPoolStats.Stats stat : indexService.getThreadPool().stats()) { + if (stat.getName().equals(ThreadPool.Names.REFRESH) && (stat.getQueue() > 0 || stat.getActive() > 0)) { + throw new AssertionError(); // cause assert busy to retry + } + } + }); client().prepareIndex("test", "test", "2").setSource("{\"foo\" : \"bar\"}", XContentType.JSON).get(); assertTrue(shard.scheduledRefresh()); assertTrue(shard.isSearchIdle()); diff --git a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java index 25f04532ac8ce..7bfa50ff2b724 100644 --- a/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java +++ b/server/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.Table; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentElasticsearchExtension; import org.elasticsearch.index.Index; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState; @@ -37,7 +38,9 @@ import org.elasticsearch.usage.UsageService; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; +import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -53,7 +56,7 @@ public void testRestRecoveryAction() { final Settings settings = Settings.EMPTY; UsageService usageService = new UsageService(); final RestController restController = new RestController(Collections.emptySet(), null, null, null, usageService); - final RestRecoveryAction action = new RestRecoveryAction(settings, restController); + final RestCatRecoveryAction action = new RestCatRecoveryAction(settings, restController); final int totalShards = randomIntBetween(1, 32); final int successfulShards = Math.max(0, totalShards - randomIntBetween(1, 2)); final int failedShards = totalShards - successfulShards; @@ -64,7 +67,11 @@ public void testRestRecoveryAction() { final RecoveryState state = mock(RecoveryState.class); when(state.getShardId()).thenReturn(new ShardId(new Index("index", "_na_"), i)); final RecoveryState.Timer timer = mock(RecoveryState.Timer.class); - when(timer.time()).thenReturn((long)randomIntBetween(1000000, 10 * 1000000)); + final long startTime = randomLongBetween(0, new Date().getTime()); + when(timer.startTime()).thenReturn(startTime); + final long time = randomLongBetween(1000000, 10 * 1000000); + when(timer.time()).thenReturn(time); + when(timer.stopTime()).thenReturn(startTime + time); when(state.getTimer()).thenReturn(timer); when(state.getRecoverySource()).thenReturn(TestShardRouting.randomRecoverySource()); when(state.getStage()).thenReturn(randomFrom(RecoveryState.Stage.values())); @@ -122,63 +129,78 @@ public void testRestRecoveryAction() { List headers = table.getHeaders(); - assertThat(headers.get(0).value, equalTo("index")); - assertThat(headers.get(1).value, equalTo("shard")); - assertThat(headers.get(2).value, equalTo("time")); - assertThat(headers.get(3).value, equalTo("type")); - assertThat(headers.get(4).value, equalTo("stage")); - assertThat(headers.get(5).value, equalTo("source_host")); - assertThat(headers.get(6).value, equalTo("source_node")); - assertThat(headers.get(7).value, equalTo("target_host")); - assertThat(headers.get(8).value, equalTo("target_node")); - assertThat(headers.get(9).value, equalTo("repository")); - assertThat(headers.get(10).value, equalTo("snapshot")); - assertThat(headers.get(11).value, equalTo("files")); - assertThat(headers.get(12).value, equalTo("files_recovered")); - assertThat(headers.get(13).value, equalTo("files_percent")); - assertThat(headers.get(14).value, equalTo("files_total")); - assertThat(headers.get(15).value, equalTo("bytes")); - assertThat(headers.get(16).value, equalTo("bytes_recovered")); - assertThat(headers.get(17).value, equalTo("bytes_percent")); - assertThat(headers.get(18).value, equalTo("bytes_total")); - assertThat(headers.get(19).value, equalTo("translog_ops")); - assertThat(headers.get(20).value, equalTo("translog_ops_recovered")); - assertThat(headers.get(21).value, equalTo("translog_ops_percent")); + final List expectedHeaders = Arrays.asList( + "index", + "shard", + "start_time", + "start_time_millis", + "stop_time", + "stop_time_millis", + "time", + "type", + "stage", + "source_host", + "source_node", + "target_host", + "target_node", + "repository", + "snapshot", + "files", + "files_recovered", + "files_percent", + "files_total", + "bytes", + "bytes_recovered", + "bytes_percent", + "bytes_total", + "translog_ops", + "translog_ops_recovered", + "translog_ops_percent"); + + for (int i = 0; i < expectedHeaders.size(); i++) { + assertThat(headers.get(i).value, equalTo(expectedHeaders.get(i))); + } assertThat(table.getRows().size(), equalTo(successfulShards)); + for (int i = 0; i < successfulShards; i++) { final RecoveryState state = recoveryStates.get(i); - List cells = table.getRows().get(i); - assertThat(cells.get(0).value, equalTo("index")); - assertThat(cells.get(1).value, equalTo(i)); - assertThat(cells.get(2).value, equalTo(new TimeValue(state.getTimer().time()))); - assertThat(cells.get(3).value, equalTo(state.getRecoverySource().getType().name().toLowerCase(Locale.ROOT))); - assertThat(cells.get(4).value, equalTo(state.getStage().name().toLowerCase(Locale.ROOT))); - assertThat(cells.get(5).value, equalTo(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName())); - assertThat(cells.get(6).value, equalTo(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getName())); - assertThat(cells.get(7).value, equalTo(state.getTargetNode().getHostName())); - assertThat(cells.get(8).value, equalTo(state.getTargetNode().getName())); - assertThat( - cells.get(9).value, - equalTo(state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? - "n/a" : - ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getRepository())); - assertThat( - cells.get(10).value, - equalTo(state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? - "n/a" : - ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getSnapshotId().getName())); - assertThat(cells.get(11).value, equalTo(state.getIndex().totalRecoverFiles())); - assertThat(cells.get(12).value, equalTo(state.getIndex().recoveredFileCount())); - assertThat(cells.get(13).value, equalTo(percent(state.getIndex().recoveredFilesPercent()))); - assertThat(cells.get(14).value, equalTo(state.getIndex().totalFileCount())); - assertThat(cells.get(15).value, equalTo(state.getIndex().totalRecoverBytes())); - assertThat(cells.get(16).value, equalTo(state.getIndex().recoveredBytes())); - assertThat(cells.get(17).value, equalTo(percent(state.getIndex().recoveredBytesPercent()))); - assertThat(cells.get(18).value, equalTo(state.getIndex().totalBytes())); - assertThat(cells.get(19).value, equalTo(state.getTranslog().totalOperations())); - assertThat(cells.get(20).value, equalTo(state.getTranslog().recoveredOperations())); - assertThat(cells.get(21).value, equalTo(percent(state.getTranslog().recoveredPercent()))); + final List expectedValues = Arrays.asList( + "index", + i, + XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().startTime()), + state.getTimer().startTime(), + XContentElasticsearchExtension.DEFAULT_DATE_PRINTER.print(state.getTimer().stopTime()), + state.getTimer().stopTime(), + new TimeValue(state.getTimer().time()), + state.getRecoverySource().getType().name().toLowerCase(Locale.ROOT), + state.getStage().name().toLowerCase(Locale.ROOT), + state.getSourceNode() == null ? "n/a" : state.getSourceNode().getHostName(), + state.getSourceNode() == null ? "n/a" : state.getSourceNode().getName(), + state.getTargetNode().getHostName(), + state.getTargetNode().getName(), + state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? + "n/a" : + ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getRepository(), + state.getRecoverySource() == null || state.getRecoverySource().getType() != RecoverySource.Type.SNAPSHOT ? + "n/a" : + ((SnapshotRecoverySource) state.getRecoverySource()).snapshot().getSnapshotId().getName(), + state.getIndex().totalRecoverFiles(), + state.getIndex().recoveredFileCount(), + percent(state.getIndex().recoveredFilesPercent()), + state.getIndex().totalFileCount(), + state.getIndex().totalRecoverBytes(), + state.getIndex().recoveredBytes(), + percent(state.getIndex().recoveredBytesPercent()), + state.getIndex().totalBytes(), + state.getTranslog().totalOperations(), + state.getTranslog().recoveredOperations(), + percent(state.getTranslog().recoveredPercent())); + + final List cells = table.getRows().get(i); + for (int j = 0; j < expectedValues.size(); j++) { + assertThat(cells.get(j).value, equalTo(expectedValues.get(j))); + } } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index c8ab7cc19dcfd..b7755dd321416 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -321,6 +321,7 @@ public List> getRescorers() { "intervals", "match", "match_all", + "match_bool_prefix", "match_none", "match_phrase", "match_phrase_prefix", diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java index 81626459db4f2..aa244ff7a320b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/InternalAggregationsTests.java @@ -50,18 +50,19 @@ public class InternalAggregationsTests extends ESTestCase { public void testReduceEmptyAggs() { List aggs = Collections.emptyList(); InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, randomBoolean()); - assertNull(InternalAggregations.reduce(aggs, Collections.emptyList(), reduceContext)); + assertNull(InternalAggregations.reduce(aggs, reduceContext)); } public void testNonFinalReduceTopLevelPipelineAggs() { InternalAggregation terms = new StringTerms("name", BucketOrder.key(true), 10, 1, Collections.emptyList(), Collections.emptyMap(), DocValueFormat.RAW, 25, false, 10, Collections.emptyList(), 0); - List aggs = Collections.singletonList(new InternalAggregations(Collections.singletonList(terms))); List topLevelPipelineAggs = new ArrayList<>(); MaxBucketPipelineAggregationBuilder maxBucketPipelineAggregationBuilder = new MaxBucketPipelineAggregationBuilder("test", "test"); topLevelPipelineAggs.add((SiblingPipelineAggregator)maxBucketPipelineAggregationBuilder.create()); + List aggs = Collections.singletonList(new InternalAggregations(Collections.singletonList(terms), + topLevelPipelineAggs)); InternalAggregation.ReduceContext reduceContext = new InternalAggregation.ReduceContext(null, null, false); - InternalAggregations reducedAggs = InternalAggregations.reduce(aggs, topLevelPipelineAggs, reduceContext); + InternalAggregations reducedAggs = InternalAggregations.reduce(aggs, reduceContext); assertEquals(1, reducedAggs.getTopLevelPipelineAggregators().size()); assertEquals(1, reducedAggs.aggregations.size()); } @@ -79,15 +80,15 @@ public void testFinalReduceTopLevelPipelineAggs() { Collections.singletonList(siblingPipelineAggregator)); reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), reduceContext); } else { - InternalAggregations aggs = new InternalAggregations(Collections.singletonList(terms)); - List topLevelPipelineAggs = Collections.singletonList(siblingPipelineAggregator); - reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), topLevelPipelineAggs, reduceContext); + InternalAggregations aggs = new InternalAggregations(Collections.singletonList(terms), + Collections.singletonList(siblingPipelineAggregator)); + reducedAggs = InternalAggregations.reduce(Collections.singletonList(aggs), reduceContext); } assertEquals(0, reducedAggs.getTopLevelPipelineAggregators().size()); assertEquals(2, reducedAggs.aggregations.size()); } - public void testSerialization() throws Exception { + public static InternalAggregations createTestInstance() throws Exception { List aggsList = new ArrayList<>(); if (randomBoolean()) { StringTermsTests stringTermsTests = new StringTermsTests(); @@ -116,7 +117,11 @@ public void testSerialization() throws Exception { topLevelPipelineAggs.add((SiblingPipelineAggregator)new SumBucketPipelineAggregationBuilder("name3", "bucket3").create()); } } - InternalAggregations aggregations = new InternalAggregations(aggsList, topLevelPipelineAggs); + return new InternalAggregations(aggsList, topLevelPipelineAggs); + } + + public void testSerialization() throws Exception { + InternalAggregations aggregations = createTestInstance(); writeToAndReadFrom(aggregations, 0); } diff --git a/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java new file mode 100644 index 0000000000000..64712b3e417a0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/query/QuerySearchResultTests.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.query; + +import org.apache.lucene.search.ScoreDoc; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.Version; +import org.elasticsearch.action.OriginalIndices; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.lucene.search.TopDocsAndMaxScore; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InternalAggregationsTests; +import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator; +import org.elasticsearch.search.suggest.SuggestTests; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; + +import java.util.List; + +import static java.util.Collections.emptyList; + +public class QuerySearchResultTests extends ESTestCase { + + private final NamedWriteableRegistry namedWriteableRegistry; + + public QuerySearchResultTests() { + SearchModule searchModule = new SearchModule(Settings.EMPTY, false, emptyList()); + this.namedWriteableRegistry = new NamedWriteableRegistry(searchModule.getNamedWriteables()); + } + + private static QuerySearchResult createTestInstance() throws Exception { + ShardId shardId = new ShardId("index", "uuid", randomInt()); + QuerySearchResult result = new QuerySearchResult(randomLong(), new SearchShardTarget("node", shardId, null, OriginalIndices.NONE)); + if (randomBoolean()) { + result.terminatedEarly(randomBoolean()); + } + TopDocs topDocs = new TopDocs(new TotalHits(randomLongBetween(0, Long.MAX_VALUE), TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]); + result.topDocs(new TopDocsAndMaxScore(topDocs, randomBoolean() ? Float.NaN : randomFloat()), new DocValueFormat[0]); + result.size(randomInt()); + result.from(randomInt()); + if (randomBoolean()) { + result.suggest(SuggestTests.createTestItem()); + } + if (randomBoolean()) { + result.aggregations(InternalAggregationsTests.createTestInstance()); + } + return result; + } + + public void testSerialization() throws Exception { + QuerySearchResult querySearchResult = createTestInstance(); + Version version = VersionUtils.randomVersion(random()); + QuerySearchResult deserialized = copyStreamable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, version); + assertEquals(querySearchResult.getRequestId(), deserialized.getRequestId()); + assertNull(deserialized.getSearchShardTarget()); + assertEquals(querySearchResult.topDocs().maxScore, deserialized.topDocs().maxScore, 0f); + assertEquals(querySearchResult.topDocs().topDocs.totalHits, deserialized.topDocs().topDocs.totalHits); + assertEquals(querySearchResult.from(), deserialized.from()); + assertEquals(querySearchResult.size(), deserialized.size()); + assertEquals(querySearchResult.hasAggs(), deserialized.hasAggs()); + if (deserialized.hasAggs()) { + Aggregations aggs = querySearchResult.consumeAggs(); + Aggregations deserializedAggs = deserialized.consumeAggs(); + assertEquals(aggs.asList(), deserializedAggs.asList()); + List pipelineAggs = ((InternalAggregations) aggs).getTopLevelPipelineAggregators(); + List deserializedPipelineAggs = + ((InternalAggregations) deserializedAggs).getTopLevelPipelineAggregators(); + assertEquals(pipelineAggs.size(), deserializedPipelineAggs.size()); + for (int i = 0; i < pipelineAggs.size(); i++) { + SiblingPipelineAggregator pipelineAgg = pipelineAggs.get(i); + SiblingPipelineAggregator deserializedPipelineAgg = deserializedPipelineAggs.get(i); + assertArrayEquals(pipelineAgg.bucketsPaths(), deserializedPipelineAgg.bucketsPaths()); + assertEquals(pipelineAgg.name(), deserializedPipelineAgg.name()); + } + } + assertEquals(querySearchResult.terminatedEarly(), deserialized.terminatedEarly()); + } +} diff --git a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java index d9f25a369d613..54d9a015b4e4a 100644 --- a/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java +++ b/server/src/test/java/org/elasticsearch/validate/SimpleValidateQueryIT.java @@ -160,7 +160,7 @@ public void testValidateEmptyCluster() { client().admin().indices().prepareValidateQuery().get(); fail("Expected IndexNotFoundException"); } catch (IndexNotFoundException e) { - assertThat(e.getMessage(), is("no such index [null]")); + assertThat(e.getMessage(), is("no such index [null] and no indices exist")); } } diff --git a/test/fixtures/hdfs-fixture/Dockerfile b/test/fixtures/hdfs-fixture/Dockerfile new file mode 100644 index 0000000000000..b9d0e60b7d6d4 --- /dev/null +++ b/test/fixtures/hdfs-fixture/Dockerfile @@ -0,0 +1,8 @@ +FROM java:8-jre + +RUN apt-get update && apt-get install net-tools + +EXPOSE 9998 +EXPOSE 9999 + +CMD java -cp "/fixture:/fixture/*" hdfs.MiniHDFS /data \ No newline at end of file diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 3f08ca7970ca7..f2aebda46b875 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -18,25 +18,23 @@ */ apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.test.fixtures' -versions << [ - 'hadoop2': '2.8.1' -] - -// we create MiniHdfsCluster with the hadoop artifact dependencies { - compile "org.apache.hadoop:hadoop-minicluster:${versions.hadoop2}" + compile "org.apache.hadoop:hadoop-minicluster:2.8.1" +} + +task syncClasses(type: Sync) { + from sourceSets.test.runtimeClasspath + into "${buildDir}/fixture" } -// for testing, until fixtures are actually debuggable. -// gradle hides *EVERYTHING* so you have no clue what went wrong. -task hdfs(type: JavaExec) { - classpath = sourceSets.test.compileClasspath + sourceSets.test.output - main = "hdfs.MiniHDFS" - args = [ 'build/fixtures/hdfsFixture' ] +preProcessFixture { + dependsOn syncClasses + + doLast { + file("${buildDir}/shared").mkdirs() + } } -// just a test fixture: we aren't using jars in releases -thirdPartyAudit.enabled = false -// TODO: add a simple HDFS client test for this fixture unitTest.enabled = false diff --git a/test/fixtures/hdfs-fixture/docker-compose.yml b/test/fixtures/hdfs-fixture/docker-compose.yml index e69de29bb2d1d..5bdc40b1f7246 100644 --- a/test/fixtures/hdfs-fixture/docker-compose.yml +++ b/test/fixtures/hdfs-fixture/docker-compose.yml @@ -0,0 +1,11 @@ +version: '3' +services: + hdfs: + hostname: hdfs.build.elastic.co + build: + context: . + dockerfile: Dockerfile + volumes: + - ./build/fixture:/fixture + ports: + - "9999:9999" diff --git a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java index ce7401fe25cae..01315cdab01ca 100644 --- a/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java +++ b/test/fixtures/hdfs-fixture/src/main/java/hdfs/MiniHDFS.java @@ -98,7 +98,6 @@ public static void main(String[] args) throws Exception { UserGroupInformation.setConfiguration(cfg); - // TODO: remove hardcoded port! MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(cfg); if (secure) { builder.nameNodePort(9998); diff --git a/test/fixtures/krb5kdc-fixture/Dockerfile b/test/fixtures/krb5kdc-fixture/Dockerfile new file mode 100644 index 0000000000000..50de6334b9c78 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/Dockerfile @@ -0,0 +1,9 @@ +FROM ubuntu:14.04 +ADD . /fixture +RUN echo kerberos.build.elastic.co > /etc/hostname && echo "127.0.0.1 kerberos.build.elastic.co" >> /etc/hosts +RUN bash /fixture/src/main/resources/provision/installkdc.sh + +EXPOSE 88 +EXPOSE 88/udp + +CMD sleep infinity \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/Vagrantfile b/test/fixtures/krb5kdc-fixture/Vagrantfile deleted file mode 100644 index 72be4dad9cbe5..0000000000000 --- a/test/fixtures/krb5kdc-fixture/Vagrantfile +++ /dev/null @@ -1,53 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -# Licensed to Elasticsearch under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# This Vagrantfile exists to define a virtual machine running MIT's Kerberos 5 -# for usage as a testing fixture for the build process. -# -# In order to connect to the KDC process on this virtual machine, find and use -# the rendered krb5.conf file in the build output directory (build/conf). -# -# In order to provision principals in the KDC, use the provided addprinc.sh -# script with vagrant's ssh facility: -# -# vagrant ssh -c /vagrant/src/main/resources/provision/addprinc.sh principal -# -# You will find the newly created principal's keytab file in the build output -# directory (build/keytabs). Principal creation is idempotent, and will recopy -# existing user keytabs from the KDC if they already exist. - -Vagrant.configure("2") do |config| - - config.vm.define "krb5kdc" do |config| - config.vm.box = "elastic/ubuntu-14.04-x86_64" - end - - config.vm.hostname = "kerberos.build.elastic.co" - - if Vagrant.has_plugin?("vagrant-cachier") - config.cache.scope = :box - end - - config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "tcp" - config.vm.network "forwarded_port", guest: 88, host: 60088, protocol: "udp" - - config.vm.provision "shell", path: "src/main/resources/provision/installkdc.sh" - -end diff --git a/test/fixtures/krb5kdc-fixture/build.gradle b/test/fixtures/krb5kdc-fixture/build.gradle index 685483d534771..a3ca8d41bc4d9 100644 --- a/test/fixtures/krb5kdc-fixture/build.gradle +++ b/test/fixtures/krb5kdc-fixture/build.gradle @@ -16,68 +16,38 @@ * specific language governing permissions and limitations * under the License. */ +apply plugin: 'elasticsearch.test.fixtures' -apply plugin: 'elasticsearch.build' - -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project.projectDir.absolutePath}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" -] - -String box = "krb5kdc" - -List defaultPrincipals = [ "elasticsearch" ] - -task update(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars +// installKDC uses tabs in it for the Kerberos ACL file. +// Ignore it for pattern checking. +forbiddenPatterns { + exclude "**/installkdc.sh" } -task up(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn update -} +List services = ["peppa", "hdfs"] -task addDefaultPrincipals { - dependsOn up +preProcessFixture.doLast { + // We need to create these up-front because if docker creates them they will be owned by root and we won't be + // able to clean them up + services.each { file("${buildDir}/shared/${it}").mkdirs() } } -for (String principal : defaultPrincipals) { - Task addTask = project.tasks.create("addPrincipal#${principal}", org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $principal" - boxName box - environmentVars vagrantEnvVars - dependsOn up +postProcessFixture { + inputs.dir("${buildDir}/shared") + services.each { service -> + File confTemplate = file("${buildDir}/shared/${service}/krb5.conf.template") + File confFile = file("${buildDir}/shared/${service}/krb5.conf") + outputs.file(confFile) + doLast { + assert confTemplate.exists() + String confContents = confTemplate.text + .replace("\${MAPPED_PORT}", "${ext."test.fixtures.${service}.udp.88"}") + confFile.text = confContents + } } - addDefaultPrincipals.dependsOn(addTask) } -task halt(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'halt' - boxName box - environmentVars vagrantEnvVars -} - -task destroy(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'destroy' - args '-f' - boxName box - environmentVars vagrantEnvVars - dependsOn halt -} +project.ext.krb5Conf = { service -> file("$buildDir/shared/${service}/krb5.conf") } +project.ext.krb5Keytabs = { service, fileName -> file("$buildDir/shared/${service}/keytabs/${fileName}") } -thirdPartyAudit.enabled = false unitTest.enabled = false - -// installKDC uses tabs in it for the Kerberos ACL file. -// Ignore it for pattern checking. -forbiddenPatterns { - exclude "**/installkdc.sh" -} diff --git a/test/fixtures/krb5kdc-fixture/docker-compose.yml b/test/fixtures/krb5kdc-fixture/docker-compose.yml new file mode 100644 index 0000000000000..4d018dd6c3e08 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/docker-compose.yml @@ -0,0 +1,24 @@ +version: '3' +services: + peppa: + hostname: kerberos.build.elastic.co + build: + context: . + dockerfile: Dockerfile + command: "bash /fixture/src/main/resources/provision/peppa.sh" + volumes: + - ./build/shared/peppa:/fixture/build + ports: + - "4444" + - "88/udp" + hdfs: + hostname: kerberos.build.elastic.co + build: + context: . + dockerfile: Dockerfile + command: "bash /fixture/src/main/resources/provision/hdfs.sh" + volumes: + - ./build/shared/hdfs:/fixture/build + ports: + - "4444" + - "88/udp" diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh index d0d1570ae299a..9fc2a0735d666 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/addprinc.sh @@ -19,6 +19,9 @@ set -e +krb5kdc +kadmind + if [[ $# -lt 1 ]]; then echo 'Usage: addprinc.sh principalName [password]' echo ' principalName user principal name without realm' @@ -30,7 +33,7 @@ PRINC="$1" PASSWD="$2" USER=$(echo $PRINC | tr "/" "_") -VDIR=/vagrant +VDIR=/fixture RESOURCES=$VDIR/src/main/resources PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties @@ -64,3 +67,9 @@ else sudo kadmin -p $ADMIN_PRIN -kt $ADMIN_KTAB -q "addprinc -pw $PASSWD $PRINC" fi fi + +echo "Copying conf to local" +# make the configuration available externally +cp -v $LOCALSTATEDIR/krb5.conf $BUILD_DIR/krb5.conf.template +# We are running as root in the container, allow non root users running the container to be able to clean these up +chmod -R 777 $BUILD_DIR \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh new file mode 100644 index 0000000000000..ef5bba076444c --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/hdfs.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +set -e + +addprinc.sh "elasticsearch" +addprinc.sh "hdfs/hdfs.build.elastic.co" + +# Use this as a signal that setup is complete +python3 -m http.server 4444 & + +sleep infinity \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh index 2dc8ed92c9462..51af7984ce476 100755 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/installkdc.sh @@ -22,32 +22,15 @@ set -e # KDC installation steps and considerations based on https://web.mit.edu/kerberos/krb5-latest/doc/admin/install_kdc.html # and helpful input from https://help.ubuntu.com/community/Kerberos -VDIR=/vagrant +VDIR=/fixture RESOURCES=$VDIR/src/main/resources PROV_DIR=$RESOURCES/provision ENVPROP_FILE=$RESOURCES/env.properties -BUILD_DIR=$VDIR/build -CONF_DIR=$BUILD_DIR/conf -KEYTAB_DIR=$BUILD_DIR/keytabs LOCALSTATEDIR=/etc LOGDIR=/var/log/krb5 MARKER_FILE=/etc/marker -# Output location for our rendered configuration files and keytabs -mkdir -p $BUILD_DIR -rm -rf $BUILD_DIR/* -mkdir -p $CONF_DIR -mkdir -p $KEYTAB_DIR - -if [ -f $MARKER_FILE ]; then - echo "Already provisioned..." - echo "Recopying configuration files..." - cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf - cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf - exit 0; -fi - # Pull environment information REALM_NAME=$(cat $ENVPROP_FILE | grep realm= | cut -d '=' -f 2) KDC_NAME=$(cat $ENVPROP_FILE | grep kdc= | cut -d '=' -f 2) @@ -60,7 +43,7 @@ sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5.conf sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf -cp $LOCALSTATEDIR/krb5.conf $CONF_DIR/krb5.conf + # Transfer and interpolate the kdc.conf mkdir -p $LOCALSTATEDIR/krb5kdc @@ -69,7 +52,6 @@ sed -i 's/${REALM_NAME}/'$REALM_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${KDC_NAME}/'$KDC_NAME'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${BUILD_ZONE}/'$BUILD_ZONE'/g' $LOCALSTATEDIR/krb5kdc/kdc.conf sed -i 's/${ELASTIC_ZONE}/'$ELASTIC_ZONE'/g' $LOCALSTATEDIR/krb5.conf -cp $LOCALSTATEDIR/krb5kdc/kdc.conf $CONF_DIR/kdc.conf # Touch logging locations mkdir -p $LOGDIR @@ -112,9 +94,5 @@ EOF kadmin.local -q "addprinc -pw elastic admin/admin@$REALM_NAME" kadmin.local -q "ktadd -k /etc/admin.keytab admin/admin@$REALM_NAME" -# Start Kerberos Services -krb5kdc -kadmind - -# Mark that the vm is already provisioned -touch $MARKER_FILE \ No newline at end of file +# Create a link so addprinc.sh is on path +ln -s $PROV_DIR/addprinc.sh /usr/bin/ \ No newline at end of file diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template index e572c12e70957..9504b49bc7301 100644 --- a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/krb5.conf.template @@ -32,12 +32,8 @@ [realms] ${REALM_NAME} = { - kdc = ${KDC_NAME}:88 - kdc = ${KDC_NAME}:60088 - kdc = localhost:60088 - kdc = localhost:88 - kdc = 127.0.0.1:60088 kdc = 127.0.0.1:88 + kdc = 127.0.0.1:${MAPPED_PORT} admin_server = ${KDC_NAME}:749 default_domain = ${BUILD_ZONE} } diff --git a/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh new file mode 100644 index 0000000000000..815a9e94e8cb5 --- /dev/null +++ b/test/fixtures/krb5kdc-fixture/src/main/resources/provision/peppa.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +addprinc.sh elasticsearch +addprinc.sh HTTP/localhost +addprinc.sh peppa +addprinc.sh george dino + +# Use this as a signal that setup is complete +python3 -m http.server 4444 & + +sleep infinity \ No newline at end of file diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index c97d289215452..7fb2d50302c11 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -1052,7 +1052,7 @@ public static List readAllOperationsInLucene(Engine engine, * Asserts the provided engine has a consistent document history between translog and Lucene index. */ public static void assertConsistentHistoryBetweenTranslogAndLuceneIndex(Engine engine, MapperService mapper) throws IOException { - if (mapper.documentMapper() == null || engine.config().getIndexSettings().isSoftDeleteEnabled() == false + if (mapper == null || mapper.documentMapper() == null || engine.config().getIndexSettings().isSoftDeleteEnabled() == false || (engine instanceof InternalEngine) == false) { return; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java index 10a61a748cd3f..86f012fbd6583 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalTestCluster.java @@ -631,13 +631,9 @@ private Settings getNodeSettings(final int nodeId, final long seed, final Settin .put("node.name", name) .put(NodeEnvironment.NODE_ID_SEED_SETTING.getKey(), seed); - final String discoveryType = DISCOVERY_TYPE_SETTING.get(updatedSettings.build()); - final boolean usingSingleNodeDiscovery = discoveryType.equals("single-node"); - if (usingSingleNodeDiscovery == false) { - if (autoManageMinMasterNodes) { - assertThat("automatically managing min master nodes require nodes to complete a join cycle when starting", - updatedSettings.get(INITIAL_STATE_TIMEOUT_SETTING.getKey()), nullValue()); - } + if (autoManageMinMasterNodes) { + assertThat("automatically managing min master nodes require nodes to complete a join cycle when starting", + updatedSettings.get(INITIAL_STATE_TIMEOUT_SETTING.getKey()), nullValue()); } return updatedSettings.build(); @@ -1160,7 +1156,7 @@ private synchronized void reset(boolean wipeData) throws IOException { nextNodeId.set(newSize); assert size() == newSize; - if (newSize > 0) { + if (autoManageMinMasterNodes && newSize > 0) { validateClusterFormed(); } logger.debug("Cluster is consistent again - nodes: [{}] nextNodeId: [{}] numSharedNodes: [{}]", diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java index fea1c3997530c..bdcf426d118f3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/Features.java @@ -47,7 +47,8 @@ public final class Features { "warnings", "yaml", "contains", - "transform_and_set" + "transform_and_set", + "arbitrary_key" )); private Features() { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java index 8ebeca4233abd..36d1ff04a5596 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/ObjectPath.java @@ -102,7 +102,17 @@ private Object evaluate(String key, Object object, Stash stash) throws IOExcepti } if (object instanceof Map) { - return ((Map) object).get(key); + final Map objectAsMap = (Map) object; + if ("_arbitrary_key_".equals(key)) { + if (objectAsMap.isEmpty()) { + throw new IllegalArgumentException("requested [" + key + "] but the map was empty"); + } + if (objectAsMap.containsKey(key)) { + throw new IllegalArgumentException("requested meta-key [" + key + "] but the map unexpectedly contains this key"); + } + return objectAsMap.keySet().iterator().next(); + } + return objectAsMap.get(key); } if (object instanceof List) { List list = (List) object; @@ -149,7 +159,7 @@ private String[] parsePath(String path) { list.add(current.toString()); } - return list.toArray(new String[list.size()]); + return list.toArray(new String[0]); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index ea12dcedf1643..954d268fe3a65 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -993,6 +993,7 @@ public void handleException(TransportException exp) { } @TestLogging(value = "org.elasticsearch.transport.TransportService.tracer:trace") + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40586") public void testTracerLog() throws Exception { TransportRequestHandler handler = (request, channel, task) -> channel.sendResponse(new StringMessageResponse("")); TransportRequestHandler handlerWithError = (request, channel, task) -> { diff --git a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java index 79d6d42092a85..9345d73733076 100644 --- a/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java +++ b/test/framework/src/test/java/org/elasticsearch/test/rest/yaml/ObjectPathTests.java @@ -34,6 +34,7 @@ import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.isOneOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -181,6 +182,56 @@ public void testEvaluateObjectKeys() throws Exception { assertThat(strings, contains("template_1", "template_2")); } + public void testEvaluateArbitraryKey() throws Exception { + XContentBuilder xContentBuilder = randomXContentBuilder(); + xContentBuilder.startObject(); + xContentBuilder.startObject("metadata"); + xContentBuilder.startObject("templates"); + xContentBuilder.startObject("template_1"); + xContentBuilder.field("field1", "value"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_2"); + xContentBuilder.field("field2", "value"); + xContentBuilder.field("field3", "value"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_3"); + xContentBuilder.endObject(); + xContentBuilder.startObject("template_4"); + xContentBuilder.field("_arbitrary_key_", "value"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + ObjectPath objectPath = ObjectPath.createFromXContent(xContentBuilder.contentType().xContent(), + BytesReference.bytes(xContentBuilder)); + + { + final Object object = objectPath.evaluate("metadata.templates.template_1._arbitrary_key_"); + assertThat(object, instanceOf(String.class)); + final String key = (String) object; + assertThat(key, equalTo("field1")); + } + + { + final Object object = objectPath.evaluate("metadata.templates.template_2._arbitrary_key_"); + assertThat(object, instanceOf(String.class)); + final String key = (String) object; + assertThat(key, isOneOf("field2", "field3")); + } + + { + final IllegalArgumentException exception + = expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_3._arbitrary_key_")); + assertThat(exception.getMessage(), equalTo("requested [_arbitrary_key_] but the map was empty")); + } + + { + final IllegalArgumentException exception + = expectThrows(IllegalArgumentException.class, () -> objectPath.evaluate("metadata.templates.template_4._arbitrary_key_")); + assertThat(exception.getMessage(), equalTo("requested meta-key [_arbitrary_key_] but the map unexpectedly contains this key")); + } + } + public void testEvaluateStashInPropertyName() throws Exception { XContentBuilder xContentBuilder = randomXContentBuilder(); xContentBuilder.startObject(); diff --git a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc index de2ad5af3081b..bfd6a14d3ed7c 100644 --- a/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-role-mappings.asciidoc @@ -50,15 +50,46 @@ mapping is performed. user. Within the `metadata` object, keys beginning with `_` are reserved for system usage. -`roles` (required):: -(list) A list of roles that are granted to the users that match the role mapping -rules. +`roles`:: +(list of strings) A list of role names that are granted to the users that match +the role mapping rules. +_Exactly one of `roles` or `role_templates` must be specified_. + +`role_templates`:: +(list of objects) A list of mustache templates that will be evaluated to +determine the roles names that should granted to the users that match the role +mapping rules. +The format of these objects is defined below. +_Exactly one of `roles` or `role_templates` must be specified_. `rules` (required):: (object) The rules that determine which users should be matched by the mapping. A rule is a logical condition that is expressed by using a JSON DSL. See <>. +==== Role Templates + +The most common use for role mappings is to create a mapping from a known value +on the user to a fixed role name. +For example, all users in the `cn=admin,dc=example,dc=com` LDAP group should be +given the `superuser` role in {es}. +The `roles` field is used for this purpose. + +For more complex needs it is possible to use Mustache templates to dynamically +determine the names of the roles that should be granted to the user. +The `role_templates` field is used for this purpose. + +All of the <> that are available in the +role mapping `rules` are also available in the role templates. Thus it is possible +to assign a user to a role that reflects their `username`, their `groups` or the +name of the `realm` to which they authenticated. + +By default a template is evaluated to produce a single string that is the name +of the role which should be assigned to the user. If the `format` of the template +is set to `"json"` then the template is expected to produce a JSON string, or an +array of JSON strings for the role name(s). + +The Examples section below demonstrates the use of templated role names. ==== Authorization @@ -117,12 +148,26 @@ POST /_security/role_mapping/mapping2 -------------------------------------------------- // CONSOLE +The following example matches users who authenticated against a specific realm: +[source, js] +------------------------------------------------------------ +POST /_security/role_mapping/mapping3 +{ + "roles": [ "ldap-user" ], + "enabled": true, + "rules": { + "field" : { "realm.name" : "ldap1" } + } +} +------------------------------------------------------------ +// CONSOLE + The following example matches any user where either the username is `esadmin` or the user is in the `cn=admin,dc=example,dc=com` group: [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping3 +POST /_security/role_mapping/mapping4 { "roles": [ "superuser" ], "enabled": true, @@ -144,25 +189,52 @@ POST /_security/role_mapping/mapping3 ------------------------------------------------------------ // CONSOLE -The following example matches users who authenticated against a specific realm: +The example above is useful when the group names in your identity management +system (such as Active Directory, or a SAML Identity Provider) do not have a +1-to-1 correspondence with the names of roles in {es}. The role mapping is the +means by which you link a _group name_ with a _role name_. + +However, in rare cases the names of your groups may be an exact match for the +names of your {es} roles. This can be the case when your SAML Identity Provider +includes its own "group mapping" feature and can be configured to release {es} +role names in the user's SAML attributes. + +In these cases it is possible to use a template that treats the group names as +role names. + +*Note*: This should only be done if you intend to define roles for all of the +provided groups. Mapping a user to a large number of unnecessary or undefined +roles is inefficient and can have a negative effect on system performance. +If you only need to map a subset of the groups, then you should do this +using explicit mappings. + [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping4 +POST /_security/role_mapping/mapping5 { - "roles": [ "ldap-user" ], - "enabled": true, + "role_templates": [ + { + "template": { "source": "{{#tojson}}groups{{/tojson}}" }, <1> + "format" : "json" <2> + } + ], "rules": { - "field" : { "realm.name" : "ldap1" } - } + "field" : { "realm.name" : "saml1" } + }, + "enabled": true } ------------------------------------------------------------ // CONSOLE +<1> The `tojson` mustache function is used to convert the list of + group names into a valid JSON array. +<2> Because the template produces a JSON array, the format must be + set to `json`. The following example matches users within a specific LDAP sub-tree: [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping5 +POST /_security/role_mapping/mapping6 { "roles": [ "example-user" ], "enabled": true, @@ -178,7 +250,7 @@ specific realm: [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping6 +POST /_security/role_mapping/mapping7 { "roles": [ "ldap-example-user" ], "enabled": true, @@ -203,7 +275,7 @@ following mapping matches any user where *all* of these conditions are met: [source, js] ------------------------------------------------------------ -POST /_security/role_mapping/mapping7 +POST /_security/role_mapping/mapping8 { "roles": [ "superuser" ], "enabled": true, @@ -240,3 +312,32 @@ POST /_security/role_mapping/mapping7 } ------------------------------------------------------------ // CONSOLE + +A templated role can be used to automatically map every user to their own +custom role. The role itself can be defined through the +<> or using a +{stack-ov}/custom-roles-authorization.html#implementing-custom-roles-provider[custom roles provider]. + +In this example every user who authenticates using the "cloud-saml" realm +will be automatically mapped to two roles - the `"saml_user"` role and a +role that is their username prefixed with `_user_`. +As an example, the user `nwong` would be assigned the `saml_user` and +`_user_nwong` roles. + +[source, js] +------------------------------------------------------------ +POST /_security/role_mapping/mapping9 +{ + "rules": { "field": { "realm.name": "cloud-saml" } }, + "role_templates": [ + { "template": { "source" : "saml_user" } }, <1> + { "template": { "source" : "_user_{{username}}" } } + ], + "enabled": true +} +------------------------------------------------------------ +// CONSOLE +<1> Because it is not possible to specify both `roles` and `role_templates` in + the same role mapping, we can apply a "fixed name" role by using a template + that has no substitutions. + diff --git a/x-pack/docs/en/watcher/actions/email.asciidoc b/x-pack/docs/en/watcher/actions/email.asciidoc index 61b0fa8cf9e7f..1f8de38319100 100644 --- a/x-pack/docs/en/watcher/actions/email.asciidoc +++ b/x-pack/docs/en/watcher/actions/email.asciidoc @@ -325,7 +325,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.gmail_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.gmail_account.smtp.secure_password -------------------------------------------------- If you get an authentication error that indicates that you need to continue the @@ -363,7 +363,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.outlook_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.outlook_account.smtp.secure_password -------------------------------------------------- @@ -400,7 +400,7 @@ In order to store the account SMTP password, use the keystore command [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.email.account.ses_account.smtp.secure_password +bin/elasticsearch-keystore add xpack.notification.email.account.ses_account.smtp.secure_password -------------------------------------------------- NOTE: You need to use your Amazon SES SMTP credentials to send email through diff --git a/x-pack/docs/en/watcher/actions/jira.asciidoc b/x-pack/docs/en/watcher/actions/jira.asciidoc index f0b9c714181b8..4608ee6ab1af5 100644 --- a/x-pack/docs/en/watcher/actions/jira.asciidoc +++ b/x-pack/docs/en/watcher/actions/jira.asciidoc @@ -109,12 +109,15 @@ Jira account you need to specify (see {ref}/secure-settings.html[secure settings [source,yaml] -------------------------------------------------- -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_url -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_user -bin/elasticsearch-keystore xpack.notification.jira.account.monitoring.secure_password +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_url +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_user +bin/elasticsearch-keystore add xpack.notification.jira.account.monitoring.secure_password -------------------------------------------------- -deprecated[The insecure way of storing sensitive data (`url`, `user` and `password`) in the configuration file or the cluster settings is deprecated] +[WARNING] +====== +Storing sensitive data (`url`, `user` and `password`) in the configuration file or the cluster settings is insecure and has been deprecated. Please use {es}'s secure {ref}/secure-settings.html[keystore] method instead. +====== To avoid credentials that transit in clear text over the network, {watcher} will reject `url` settings like `http://internal-jira.elastic.co` that are based on diff --git a/x-pack/docs/en/watcher/actions/slack.asciidoc b/x-pack/docs/en/watcher/actions/slack.asciidoc index ef8b907677b8c..0e9177c604d05 100644 --- a/x-pack/docs/en/watcher/actions/slack.asciidoc +++ b/x-pack/docs/en/watcher/actions/slack.asciidoc @@ -196,16 +196,20 @@ image::images/slack-add-webhook-integration.jpg[] image::images/slack-copy-webhook-url.jpg[] To configure a Slack account, at a minimum you need to specify the account -name and webhook URL in the elasticsearch keystore (se {ref}/secure-settings.html[secure settings]): +name and webhook URL in the {es} keystore (see {ref}/secure-settings.html[secure settings]): [source,shell] -------------------------------------------------- bin/elasticsearch-keystore add xpack.notification.slack.account.monitoring.secure_url -------------------------------------------------- -deprecated[You can also configure this via settings in the `elasticsearch.yml` file, using the keystore is the preferred and secure way of doing this] +[WARNING] +====== +You can no longer configure Slack accounts using `elasticsearch.yml` settings. +Please use {es}'s secure {ref}/secure-settings.html[keystore] method instead. +====== -You can also specify defaults for the {ref}/notification-settings.html#slack-account-attributes[Slack +You can specify defaults for the {ref}/notification-settings.html#slack-account-attributes[Slack notification attributes]: [source,yaml] diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java index b9a3c7ed021f4..44f8583bb9b5a 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportResumeFollowActionTests.java @@ -238,7 +238,7 @@ public void testDynamicIndexSettingsAreClassified() { if (setting.isDynamic()) { boolean notReplicated = TransportResumeFollowAction.NON_REPLICATED_SETTINGS.contains(setting); boolean replicated = replicatedSettings.contains(setting); - assertThat("setting [" + setting.getKey() + "] is not classified as replicated xor not replicated", + assertThat("setting [" + setting.getKey() + "] is not classified as replicated or not replicated", notReplicated ^ replicated, is(true)); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index 196982c0a35fb..c1a682757d140 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -96,10 +96,26 @@ public GraphExploreRequest indicesOptions(IndicesOptions indicesOptions) { return this; } + /** + * The document types to execute the explore against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public String[] types() { return this.types; } + /** + * The document types to execute the explore request against. Defaults to be executed against + * all types. + * + * @deprecated Types are in the process of being removed. Instead of using a type, prefer to + * filter on a field on the document. + */ + @Deprecated public GraphExploreRequest types(String... types) { this.types = types; return this; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a745215fa5533..dc8403b7bd548 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -57,9 +57,9 @@ import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; -import org.elasticsearch.xpack.core.indexlifecycle.SetPriorityAction; import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; +import org.elasticsearch.xpack.core.indexlifecycle.SetPriorityAction; import org.elasticsearch.xpack.core.indexlifecycle.ShrinkAction; import org.elasticsearch.xpack.core.indexlifecycle.TimeseriesLifecycleType; import org.elasticsearch.xpack.core.indexlifecycle.UnfollowAction; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java index fbd81e438a130..73e639cec5e1d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/DataFrameField.java @@ -53,6 +53,9 @@ public final class DataFrameField { */ public static final String FOR_INTERNAL_STORAGE = "for_internal_storage"; + // internal document id + public static String DOCUMENT_ID_FIELD = "_id"; + private DataFrameField() { } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java index 13e62da090c3e..0316153fbc822 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/DeleteDataFrameTransformAction.java @@ -6,17 +6,14 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; @@ -42,16 +39,13 @@ public Response newResponse() { return new Response(); } - public static class Request extends BaseTasksRequest implements ToXContentFragment { + public static class Request extends BaseTasksRequest { private String id; public Request(String id) { this.id = ExceptionsHelper.requireNonNull(id, DataFrameField.ID.getPreferredName()); } - private Request() { - } - public Request(StreamInput in) throws IOException { super(in); id = in.readString(); @@ -77,12 +71,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - return builder; - } - @Override public int hashCode() { return Objects.hash(id); @@ -102,14 +90,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder - extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, DeleteDataFrameTransformAction action) { - super(client, action, new DeleteDataFrameTransformAction.Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private boolean acknowledged; public Response(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java index ca6bf9d16e62f..ac1498c72a6da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsAction.java @@ -8,14 +8,11 @@ import org.apache.logging.log4j.LogManager; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.action.AbstractGetResourcesRequest; @@ -48,7 +45,7 @@ public Response newResponse() { return new Response(); } - public static class Request extends AbstractGetResourcesRequest implements ToXContent { + public static class Request extends AbstractGetResourcesRequest { private static final int MAX_SIZE_RETURN = 1000; @@ -78,25 +75,12 @@ public ActionRequestValidationException validate() { return exception; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), getResourceId()); - return builder; - } - @Override public String getResourceIdField() { return DataFrameField.ID.getPreferredName(); } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, GetDataFrameTransformsAction action) { - super(client, action, new Request()); - } - } - public static class Response extends AbstractGetResourcesResponse implements Writeable, ToXContentObject { public static final String INVALID_TRANSFORMS_DEPRECATION_WARNING = "Found [{}] invalid transforms"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java index 47e922033072b..f0e92aa36db2f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/GetDataFrameTransformsStatsAction.java @@ -7,19 +7,16 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; @@ -44,7 +41,7 @@ public Response newResponse() { return new Response(); } - public static class Request extends BaseTasksRequest implements ToXContent { + public static class Request extends BaseTasksRequest { private String id; public Request(String id) { @@ -55,8 +52,6 @@ public Request(String id) { } } - private Request() {} - public Request(StreamInput in) throws IOException { super(in); id = in.readString(); @@ -87,12 +82,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - return builder; - } - @Override public int hashCode() { return Objects.hash(id); @@ -111,13 +100,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, GetDataFrameTransformsStatsAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private List transformsStateAndStats; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java index 6c226003f663a..51b5e0d4ec1d6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/PutDataFrameTransformAction.java @@ -10,8 +10,6 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContentObject; @@ -100,13 +98,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends MasterNodeOperationRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, PutDataFrameTransformAction action) { - super(client, action, new Request()); - } - } - public static class Response extends AcknowledgedResponse { public Response() { super(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java index 161c4d7d2587a..b86a2339faa47 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformAction.java @@ -7,15 +7,12 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -39,7 +36,7 @@ public Response newResponse() { return new Response(); } - public static class Request extends AcknowledgedRequest implements ToXContent { + public static class Request extends AcknowledgedRequest { private String id; private boolean force; @@ -76,12 +73,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - return builder; - } - @Override public int hashCode() { return Objects.hash(id); @@ -100,13 +91,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, StartDataFrameTransformAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private boolean started; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java index a51b9243c3d44..d3c96fb9cf171 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskAction.java @@ -7,15 +7,12 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.dataframe.DataFrameField; @@ -39,7 +36,7 @@ public Response newResponse() { return new Response(); } - public static class Request extends BaseTasksRequest implements ToXContent { + public static class Request extends BaseTasksRequest { private String id; @@ -70,12 +67,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - return builder; - } - @Override public int hashCode() { return Objects.hash(id); @@ -94,13 +85,6 @@ public boolean equals(Object obj) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, StartDataFrameTransformTaskAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private boolean started; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java index 2fb14ddd013b6..7fa437bd15606 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/dataframe/action/StopDataFrameTransformAction.java @@ -6,17 +6,14 @@ package org.elasticsearch.xpack.core.dataframe.action; import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; -import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.tasks.Task; @@ -44,7 +41,7 @@ public Response newResponse() { return new Response(); } - public static class Request extends BaseTasksRequest implements ToXContent { + public static class Request extends BaseTasksRequest { private String id; private final boolean waitForCompletion; private final boolean force; @@ -98,17 +95,6 @@ public ActionRequestValidationException validate() { return null; } - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(DataFrameField.ID.getPreferredName(), id); - builder.field(DataFrameField.WAIT_FOR_COMPLETION.getPreferredName(), waitForCompletion); - builder.field(DataFrameField.FORCE.getPreferredName(), force); - if (this.getTimeout() != null) { - builder.field(DataFrameField.TIMEOUT.getPreferredName(), this.getTimeout()); - } - return builder; - } - @Override public int hashCode() { // the base class does not implement hashCode, therefore we need to hash timeout ourselves @@ -144,13 +130,6 @@ public boolean match(Task task) { } } - public static class RequestBuilder extends ActionRequestBuilder { - - protected RequestBuilder(ElasticsearchClient client, StopDataFrameTransformAction action) { - super(client, action, new Request()); - } - } - public static class Response extends BaseTasksResponse implements Writeable, ToXContentObject { private boolean stopped; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java index 7dce5e85ab75d..c0cafa8e9079e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/IndexLifecycleMetadata.java @@ -106,7 +106,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public Version getMinimalSupportedVersion() { - return Version.V_7_0_0; + return Version.V_6_6_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java index 3cfaeba048d5f..958120b99b879 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStep.java @@ -20,7 +20,9 @@ import java.io.IOException; import java.util.Arrays; +import java.util.Collection; import java.util.Objects; +import java.util.Optional; /** * A step that waits until the index it's used on is no longer a leader index. @@ -57,8 +59,11 @@ public void evaluateCondition(IndexMetaData indexMetaData, Listener listener) { boolean isCurrentlyLeaderIndex = Arrays.stream(indexStats.getShards()) .map(ShardStats::getRetentionLeaseStats) - .flatMap(retentionLeaseStats -> retentionLeaseStats.retentionLeases().leases().stream()) - .anyMatch(lease -> CCR_LEASE_KEY.equals(lease.source())); + .map(Optional::ofNullable) + .map(o -> o.flatMap(stats -> Optional.ofNullable(stats.retentionLeases()))) + .map(o -> o.flatMap(leases -> Optional.ofNullable(leases.leases()))) + .map(o -> o.map(Collection::stream)) + .anyMatch(lease -> lease.isPresent() && lease.get().anyMatch(l -> CCR_LEASE_KEY.equals(l.source()))); if (isCurrentlyLeaderIndex) { listener.onResponse(false, new Info()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java index 087e29ec8b56a..168adaa111658 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -5,12 +5,14 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionParser; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -35,6 +37,7 @@ public class PutRoleMappingRequest extends ActionRequest private String name = null; private boolean enabled = true; private List roles = Collections.emptyList(); + private List roleTemplates = Collections.emptyList(); private RoleMapperExpression rules = null; private Map metadata = Collections.emptyMap(); private RefreshPolicy refreshPolicy = RefreshPolicy.IMMEDIATE; @@ -46,20 +49,20 @@ public PutRoleMappingRequest() { public ActionRequestValidationException validate() { ActionRequestValidationException validationException = null; if (name == null) { - validationException = addValidationError("role-mapping name is missing", - validationException); + validationException = addValidationError("role-mapping name is missing", validationException); } - if (roles.isEmpty()) { - validationException = addValidationError("role-mapping roles are missing", - validationException); + if (roles.isEmpty() && roleTemplates.isEmpty()) { + validationException = addValidationError("role-mapping roles or role-templates are missing", validationException); + } + if (roles.size() > 0 && roleTemplates.size() > 0) { + validationException = addValidationError("role-mapping cannot have both roles and role-templates", validationException); } if (rules == null) { - validationException = addValidationError("role-mapping rules are missing", - validationException); + validationException = addValidationError("role-mapping rules are missing", validationException); } if (MetadataUtils.containsReservedMetadata(metadata)) { - validationException = addValidationError("metadata keys may not start with [" + - MetadataUtils.RESERVED_PREFIX + "]", validationException); + validationException = addValidationError("metadata keys may not start with [" + MetadataUtils.RESERVED_PREFIX + "]", + validationException); } return validationException; } @@ -84,10 +87,18 @@ public List getRoles() { return Collections.unmodifiableList(roles); } + public List getRoleTemplates() { + return Collections.unmodifiableList(roleTemplates); + } + public void setRoles(List roles) { this.roles = new ArrayList<>(roles); } + public void setRoleTemplates(List templates) { + this.roleTemplates = new ArrayList<>(templates); + } + public RoleMapperExpression getRules() { return rules; } @@ -126,6 +137,9 @@ public void readFrom(StreamInput in) throws IOException { this.name = in.readString(); this.enabled = in.readBoolean(); this.roles = in.readStringList(); + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + this.roleTemplates = in.readList(TemplateRoleName::new); + } this.rules = ExpressionParser.readExpression(in); this.metadata = in.readMap(); this.refreshPolicy = RefreshPolicy.readFrom(in); @@ -137,6 +151,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeBoolean(enabled); out.writeStringCollection(roles); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeList(roleTemplates); + } ExpressionParser.writeExpression(rules, out); out.writeMap(metadata); refreshPolicy.writeTo(out); @@ -147,6 +164,7 @@ public ExpressionRoleMapping getMapping() { name, rules, roles, + roleTemplates, metadata, enabled ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java index c74952e9dfd09..14f722d169410 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequestBuilder.java @@ -5,18 +5,19 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; - import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.WriteRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; +import java.io.IOException; +import java.util.Arrays; +import java.util.Map; + /** * Builder for requests to add/update a role-mapping to the native store * @@ -38,6 +39,7 @@ public PutRoleMappingRequestBuilder source(String name, BytesReference source, request.setName(name); request.setEnabled(mapping.isEnabled()); request.setRoles(mapping.getRoles()); + request.setRoleTemplates(mapping.getRoleTemplates()); request.setRules(mapping.getExpression()); request.setMetadata(mapping.getMetadata()); return this; @@ -52,6 +54,10 @@ public PutRoleMappingRequestBuilder roles(String... roles) { request.setRoles(Arrays.asList(roles)); return this; } + public PutRoleMappingRequestBuilder roleTemplates(TemplateRoleName... templates) { + request.setRoleTemplates(Arrays.asList(templates)); + return this; + } public PutRoleMappingRequestBuilder expression(RoleMapperExpression expression) { request.setRules(expression); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index 95d1e9fa77149..dd5fb08fa14b7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.core.security.authc.support.mapper; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -15,20 +16,28 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser.ValueType; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionParser; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.RoleMapperExpression; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Set; import java.util.function.BiConsumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; /** * A representation of a single role-mapping for use in NativeRoleMappingStore. @@ -50,27 +59,30 @@ public class ExpressionRoleMapping implements ToXContentObject, Writeable { static { PARSER.declareStringArray(Builder::roles, Fields.ROLES); - PARSER.declareField(Builder::rules, ExpressionParser::parseObject, Fields.RULES, ObjectParser.ValueType.OBJECT); - PARSER.declareField(Builder::metadata, XContentParser::map, Fields.METADATA, ObjectParser.ValueType.OBJECT); + PARSER.declareObjectArray(Builder::roleTemplates, (parser, ctx) -> TemplateRoleName.parse(parser), Fields.ROLE_TEMPLATES); + PARSER.declareField(Builder::rules, ExpressionParser::parseObject, Fields.RULES, ValueType.OBJECT); + PARSER.declareField(Builder::metadata, XContentParser::map, Fields.METADATA, ValueType.OBJECT); PARSER.declareBoolean(Builder::enabled, Fields.ENABLED); BiConsumer ignored = (b, v) -> { }; // skip the doc_type and type fields in case we're parsing directly from the index PARSER.declareString(ignored, new ParseField(NativeRoleMappingStoreField.DOC_TYPE_FIELD)); PARSER.declareString(ignored, new ParseField(UPGRADE_API_TYPE_FIELD)); - } + } private final String name; private final RoleMapperExpression expression; private final List roles; + private final List roleTemplates ; private final Map metadata; private final boolean enabled; - public ExpressionRoleMapping(String name, RoleMapperExpression expr, List roles, Map metadata, - boolean enabled) { + public ExpressionRoleMapping(String name, RoleMapperExpression expr, List roles, List templates, + Map metadata, boolean enabled) { this.name = name; this.expression = expr; - this.roles = roles; + this.roles = roles == null ? Collections.emptyList() : roles; + this.roleTemplates = templates == null ? Collections.emptyList() : templates; this.metadata = metadata; this.enabled = enabled; } @@ -79,6 +91,11 @@ public ExpressionRoleMapping(StreamInput in) throws IOException { this.name = in.readString(); this.enabled = in.readBoolean(); this.roles = in.readStringList(); + if (in.getVersion().onOrAfter(Version.V_8_0_0)) { + this.roleTemplates = in.readList(TemplateRoleName::new); + } else { + this.roleTemplates = Collections.emptyList(); + } this.expression = ExpressionParser.readExpression(in); this.metadata = in.readMap(); } @@ -88,6 +105,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeBoolean(enabled); out.writeStringCollection(roles); + if (out.getVersion().onOrAfter(Version.V_8_0_0)) { + out.writeList(roleTemplates); + } ExpressionParser.writeExpression(expression, out); out.writeMap(metadata); } @@ -103,7 +123,7 @@ public String getName() { /** * The expression that determines whether the roles in this mapping should be applied to any given user. * If the expression - * {@link RoleMapperExpression#match(org.elasticsearch.xpack.security.authc.support.mapper.expressiondsl.ExpressionModel) matches} a + * {@link RoleMapperExpression#match(ExpressionModel) matches} a * org.elasticsearch.xpack.security.authc.support.UserRoleMapper.UserData user, then the user should be assigned this mapping's * {@link #getRoles() roles} */ @@ -119,6 +139,14 @@ public List getRoles() { return Collections.unmodifiableList(roles); } + /** + * The list of {@link RoleDescriptor roles} (specified by a {@link TemplateRoleName template} that evaluates to one or more names) + * that should be assigned to users that match the {@link #getExpression() expression} in this mapping. + */ + public List getRoleTemplates() { + return Collections.unmodifiableList(roleTemplates); + } + /** * Meta-data for this mapping. This exists for external systems of user to track information about this mapping such as where it was * sourced from, when it was loaded, etc. @@ -137,7 +165,30 @@ public boolean isEnabled() { @Override public String toString() { - return getClass().getSimpleName() + "<" + name + " ; " + roles + " = " + Strings.toString(expression) + ">"; + return getClass().getSimpleName() + "<" + name + " ; " + roles + "/" + roleTemplates + " = " + Strings.toString(expression) + ">"; + } + + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ExpressionRoleMapping that = (ExpressionRoleMapping) o; + return this.enabled == that.enabled && + Objects.equals(this.name, that.name) && + Objects.equals(this.expression, that.expression) && + Objects.equals(this.roles, that.roles) && + Objects.equals(this.roleTemplates, that.roleTemplates) && + Objects.equals(this.metadata, that.metadata); + } + + @Override + public int hashCode() { + return Objects.hash(name, expression, roles, roleTemplates, metadata, enabled); } /** @@ -157,7 +208,7 @@ public static ExpressionRoleMapping parse(String name, BytesReference source, XC */ public static ExpressionRoleMapping parse(String name, XContentParser parser) throws IOException { try { - final Builder builder = PARSER.parse(parser, null); + final Builder builder = PARSER.parse(parser, name); return builder.build(name); } catch (IllegalArgumentException | IllegalStateException e) { throw new ParsingException(parser.getTokenLocation(), e.getMessage(), e); @@ -166,38 +217,55 @@ public static ExpressionRoleMapping parse(String name, XContentParser parser) th /** * Converts this {@link ExpressionRoleMapping} into XContent that is compatible with - * the format handled by {@link #parse(String, XContentParser)}. + * the format handled by {@link #parse(String, BytesReference, XContentType)}. */ @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { return toXContent(builder, params, false); } - public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean includeDocType) throws IOException { + public XContentBuilder toXContent(XContentBuilder builder, Params params, boolean indexFormat) throws IOException { builder.startObject(); builder.field(Fields.ENABLED.getPreferredName(), enabled); - builder.startArray(Fields.ROLES.getPreferredName()); - for (String r : roles) { - builder.value(r); + if (roles.isEmpty() == false) { + builder.startArray(Fields.ROLES.getPreferredName()); + for (String r : roles) { + builder.value(r); + } + builder.endArray(); + } + if (roleTemplates.isEmpty() == false) { + builder.startArray(Fields.ROLE_TEMPLATES.getPreferredName()); + for (TemplateRoleName r : roleTemplates) { + builder.value(r); + } + builder.endArray(); } - builder.endArray(); builder.field(Fields.RULES.getPreferredName()); expression.toXContent(builder, params); builder.field(Fields.METADATA.getPreferredName(), metadata); - if (includeDocType) { + if (indexFormat) { builder.field(NativeRoleMappingStoreField.DOC_TYPE_FIELD, NativeRoleMappingStoreField.DOC_TYPE_ROLE_MAPPING); } return builder.endObject(); } + public Set getRoleNames(ScriptService scriptService, ExpressionModel model) { + return Stream.concat(this.roles.stream(), + this.roleTemplates.stream() + .flatMap(r -> r.getRoleNames(scriptService, model).stream()) + ).collect(Collectors.toSet()); + } + /** * Used to facilitate the use of {@link ObjectParser} (via {@link #PARSER}). */ private static class Builder { private RoleMapperExpression rules; private List roles; + private List roleTemplates; private Map metadata = Collections.emptyMap(); private Boolean enabled; @@ -207,7 +275,12 @@ Builder rules(RoleMapperExpression expression) { } Builder roles(List roles) { - this.roles = roles; + this.roles = new ArrayList<>(roles); + return this; + } + + Builder roleTemplates(List templates) { + this.roleTemplates = new ArrayList<>(templates); return this; } @@ -222,7 +295,7 @@ Builder enabled(boolean enabled) { } private ExpressionRoleMapping build(String name) { - if (roles == null) { + if (roles == null && roleTemplates == null) { throw missingField(name, Fields.ROLES); } if (rules == null) { @@ -231,17 +304,17 @@ private ExpressionRoleMapping build(String name) { if (enabled == null) { throw missingField(name, Fields.ENABLED); } - return new ExpressionRoleMapping(name, rules, roles, metadata, enabled); + return new ExpressionRoleMapping(name, rules, roles, roleTemplates, metadata, enabled); } private IllegalStateException missingField(String id, ParseField field) { return new IllegalStateException("failed to parse role-mapping [" + id + "]. missing field [" + field + "]"); } - } public interface Fields { ParseField ROLES = new ParseField("roles"); + ParseField ROLE_TEMPLATES = new ParseField("role_templates"); ParseField ENABLED = new ParseField("enabled"); ParseField RULES = new ParseField("rules"); ParseField METADATA = new ParseField("metadata"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java new file mode 100644 index 0000000000000..d77882d6454d7 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleName.java @@ -0,0 +1,211 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.authc.support.mapper; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; +import org.elasticsearch.xpack.core.security.support.MustacheTemplateEvaluator; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +/** + * Representation of a Mustache template for expressing one or more roles names in a {@link ExpressionRoleMapping}. + */ +public class TemplateRoleName implements ToXContent, Writeable { + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "role-mapping-template", false, arr -> new TemplateRoleName((BytesReference) arr[0], (Format) arr[1])); + + static { + PARSER.declareField(constructorArg(), TemplateRoleName::extractTemplate, Fields.TEMPLATE, ObjectParser.ValueType.OBJECT_OR_STRING); + PARSER.declareField(optionalConstructorArg(), Format::fromXContent, Fields.FORMAT, ObjectParser.ValueType.STRING); + } + + private final BytesReference template; + private final Format format; + + public TemplateRoleName(BytesReference template, Format format) { + this.template = template; + this.format = format == null ? Format.STRING : format; + } + + public TemplateRoleName(StreamInput in) throws IOException { + this.template = in.readBytesReference(); + this.format = in.readEnum(Format.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBytesReference(template); + out.writeEnum(format); + } + + public BytesReference getTemplate() { + return template; + } + + public Format getFormat() { + return format; + } + + public List getRoleNames(ScriptService scriptService, ExpressionModel model) { + try { + final String evaluation = parseTemplate(scriptService, model.asMap()); + switch (format) { + case STRING: + return Collections.singletonList(evaluation); + case JSON: + return convertJsonToList(evaluation); + default: + throw new IllegalStateException("Unsupported format [" + format + "]"); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private List convertJsonToList(String evaluation) throws IOException { + final XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, + LoggingDeprecationHandler.INSTANCE, evaluation); + XContentParser.Token token = parser.currentToken(); + if (token == null) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.VALUE_STRING) { + return Collections.singletonList(parser.text()); + } else if (token == XContentParser.Token.START_ARRAY) { + return parser.list().stream() + .filter(Objects::nonNull) + .map(o -> { + if (o instanceof String) { + return (String) o; + } else { + throw new XContentParseException( + "Roles array may only contain strings but found [" + o.getClass().getName() + "] [" + o + "]"); + } + }).collect(Collectors.toList()); + } else { + throw new XContentParseException( + "Roles template must generate a string or an array of strings, but found [" + token + "]"); + } + } + + private String parseTemplate(ScriptService scriptService, Map parameters) throws IOException { + final XContentParser parser = XContentHelper.createParser( + NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, template, XContentType.JSON); + return MustacheTemplateEvaluator.evaluate(scriptService, parser, parameters); + } + + private static BytesReference extractTemplate(XContentParser parser, Void ignore) throws IOException { + if (parser.currentToken() == XContentParser.Token.VALUE_STRING) { + return new BytesArray(parser.text()); + } else { + XContentBuilder builder = JsonXContent.contentBuilder(); + builder.generator().copyCurrentStructure(parser); + return BytesReference.bytes(builder); + } + } + + static TemplateRoleName parse(XContentParser parser) throws IOException { + return PARSER.parse(parser, null); + } + + @Override + public String toString() { + return "template-" + format + "{" + template.utf8ToString() + "}"; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return builder.startObject() + .field(Fields.TEMPLATE.getPreferredName(), template.utf8ToString()) + .field(Fields.FORMAT.getPreferredName(), format.formatName()) + .endObject(); + } + + @Override + public boolean isFragment() { + return false; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TemplateRoleName that = (TemplateRoleName) o; + return Objects.equals(this.template, that.template) && + this.format == that.format; + } + + @Override + public int hashCode() { + return Objects.hash(template, format); + } + + private interface Fields { + ParseField TEMPLATE = new ParseField("template"); + ParseField FORMAT = new ParseField("format"); + } + + public enum Format { + JSON, STRING; + + private static Format fromXContent(XContentParser parser) throws IOException { + final XContentParser.Token token = parser.currentToken(); + if (token != XContentParser.Token.VALUE_STRING) { + throw new XContentParseException(parser.getTokenLocation(), + "Expected [" + XContentParser.Token.VALUE_STRING + "] but found [" + token + "]"); + } + final String text = parser.text(); + try { + return Format.valueOf(text.toUpperCase(Locale.ROOT)); + } catch (IllegalArgumentException e) { + String valueNames = Stream.of(values()).map(Format::formatName).collect(Collectors.joining(",")); + throw new XContentParseException(parser.getTokenLocation(), + "Invalid format [" + text + "] expected one of [" + valueNames + "]"); + } + + } + + public String formatName() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java index 8d43f864878af..d12cc67dcca1b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionModel.java @@ -6,9 +6,9 @@ package org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl; import org.elasticsearch.common.Numbers; -import org.elasticsearch.common.collect.Tuple; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -22,10 +22,13 @@ public class ExpressionModel { public static final Predicate NULL_PREDICATE = field -> field.getValue() == null; - private Map>> fields; + + private final Map fieldValues; + private final Map> fieldPredicates; public ExpressionModel() { - this.fields = new HashMap<>(); + this.fieldValues = new HashMap<>(); + this.fieldPredicates = new HashMap<>(); } /** @@ -41,7 +44,8 @@ public ExpressionModel defineField(String name, Object value) { * Defines a field using a supplied predicate. */ public ExpressionModel defineField(String name, Object value, Predicate predicate) { - this.fields.put(name, new Tuple<>(value, predicate)); + this.fieldValues.put(name, value); + this.fieldPredicates.put(name, predicate); return this; } @@ -49,13 +53,7 @@ public ExpressionModel defineField(String name, Object value, Predicateany of the provided values. */ public boolean test(String field, List values) { - final Tuple> tuple = this.fields.get(field); - final Predicate predicate; - if (tuple == null) { - predicate = NULL_PREDICATE; - } else { - predicate = tuple.v2(); - } + final Predicate predicate = this.fieldPredicates.getOrDefault(field, NULL_PREDICATE); return values.stream().anyMatch(predicate); } @@ -103,4 +101,12 @@ private static boolean numberEquals(Number left, Object other) { return Numbers.toLongExact(left) == Numbers.toLongExact(right); } + public Map asMap() { + return Collections.unmodifiableMap(fieldValues); + } + + @Override + public String toString() { + return fieldValues.toString(); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java index 0e681b110efa4..bea4bbb1cc8fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java @@ -18,6 +18,7 @@ import java.io.IOException; import java.util.Collections; import java.util.List; +import java.util.Objects; /** * An expression that evaluates to true if a field (map element) matches @@ -151,6 +152,22 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.value(value); } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final FieldValue that = (FieldValue) o; + return Objects.equals(this.value, that.value); + } + + @Override + public int hashCode() { + return Objects.hash(value); + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java index 0cd4e8a8b0ddc..da6af4ec7cbdc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermission.java @@ -104,15 +104,18 @@ public ResourcePrivilegesMap checkResourcePrivileges(final String applicationNam for (String checkResource : checkForResources) { for (String checkPrivilegeName : checkForPrivilegeNames) { final Set nameSet = Collections.singleton(checkPrivilegeName); - final ApplicationPrivilege checkPrivilege = ApplicationPrivilege.get(applicationName, nameSet, storedPrivileges); - assert checkPrivilege.getApplication().equals(applicationName) : "Privilege " + checkPrivilege + " should have application " - + applicationName; - assert checkPrivilege.name().equals(nameSet) : "Privilege " + checkPrivilege + " should have name " + nameSet; - - if (grants(checkPrivilege, checkResource)) { - resourcePrivilegesMapBuilder.addResourcePrivilege(checkResource, checkPrivilegeName, Boolean.TRUE); - } else { - resourcePrivilegesMapBuilder.addResourcePrivilege(checkResource, checkPrivilegeName, Boolean.FALSE); + final Set checkPrivileges = ApplicationPrivilege.get(applicationName, nameSet, storedPrivileges); + logger.trace("Resolved privileges [{}] for [{},{}]", checkPrivileges, applicationName, nameSet); + for (ApplicationPrivilege checkPrivilege : checkPrivileges) { + assert Automatons.predicate(applicationName).test(checkPrivilege.getApplication()) : "Privilege " + checkPrivilege + + " should have application " + applicationName; + assert checkPrivilege.name().equals(nameSet) : "Privilege " + checkPrivilege + " should have name " + nameSet; + + if (grants(checkPrivilege, checkResource)) { + resourcePrivilegesMapBuilder.addResourcePrivilege(checkResource, checkPrivilegeName, Boolean.TRUE); + } else { + resourcePrivilegesMapBuilder.addResourcePrivilege(checkResource, checkPrivilegeName, Boolean.FALSE); + } } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilege.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilege.java index 13db17a63bb0d..c4460b000e6d8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilege.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilege.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.core.security.authz.privilege; import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.core.security.support.Automatons; import java.util.Arrays; import java.util.Collection; @@ -15,6 +16,7 @@ import java.util.Objects; import java.util.Set; import java.util.function.Function; +import java.util.function.Predicate; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -101,7 +103,7 @@ private static void validateApplicationName(String application, boolean allowWil if (allowWildcard == false) { throw new IllegalArgumentException("Application names may not contain '*' (found '" + application + "')"); } - if(application.equals("*")) { + if (application.equals("*")) { // this is allowed and short-circuiting here makes the later validation simpler return; } @@ -128,7 +130,10 @@ private static void validateApplicationName(String application, boolean allowWil } if (parts.length > 1) { - final String suffix = parts[1]; + String suffix = parts[1]; + if (allowWildcard && suffix.endsWith("*")) { + suffix = suffix.substring(0, suffix.length() - 1); + } if (Strings.validFileName(suffix) == false) { throw new IllegalArgumentException("An application name suffix may not contain any of the characters '" + Strings.collectionToDelimitedString(Strings.INVALID_FILENAME_CHARS, "") + "' (found '" + suffix + "')"); @@ -165,20 +170,38 @@ public static void validatePrivilegeOrActionName(String name) { } /** - * Finds or creates an application privileges with the provided names. + * Finds or creates a collection of application privileges with the provided names. + * If application is a wildcard, it will be expanded to all matching application names in {@code stored} * Each element in {@code name} may be the name of a stored privilege (to be resolved from {@code stored}, or a bespoke action pattern. */ - public static ApplicationPrivilege get(String application, Set name, Collection stored) { + public static Set get(String application, Set name, Collection stored) { if (name.isEmpty()) { - return NONE.apply(application); + return Collections.singleton(NONE.apply(application)); + } else if (application.contains("*")) { + Predicate predicate = Automatons.predicate(application); + final Set result = stored.stream() + .map(ApplicationPrivilegeDescriptor::getApplication) + .filter(predicate) + .distinct() + .map(appName -> resolve(appName, name, stored)) + .collect(Collectors.toSet()); + if (result.isEmpty()) { + return Collections.singleton(resolve(application, name, Collections.emptyMap())); + } else { + return result; + } } else { - Map lookup = stored.stream() - .filter(apd -> apd.getApplication().equals(application)) - .collect(Collectors.toMap(ApplicationPrivilegeDescriptor::getName, Function.identity())); - return resolve(application, name, lookup); + return Collections.singleton(resolve(application, name, stored)); } } + private static ApplicationPrivilege resolve(String application, Set name, Collection stored) { + final Map lookup = stored.stream() + .filter(apd -> apd.getApplication().equals(application)) + .collect(Collectors.toMap(ApplicationPrivilegeDescriptor::getName, Function.identity())); + return resolve(application, name, lookup); + } + private static ApplicationPrivilege resolve(String application, Set names, Map lookup) { final int size = names.size(); if (size == 0) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java index 85d6aad3e3560..613f64f93b54a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java @@ -23,6 +23,8 @@ import java.util.Objects; import java.util.Set; +import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString; + /** * An {@code ApplicationPrivilegeDescriptor} is a representation of a stored {@link ApplicationPrivilege}. * A user (via a role) can be granted an application privilege by name (e.g. ("myapp", "read"). @@ -104,6 +106,11 @@ public XContentBuilder toXContent(XContentBuilder builder, boolean includeTypeFi return builder.endObject(); } + @Override + public String toString() { + return getClass().getSimpleName() + "{[" + application + "],[" + name + "],[" + collectionToCommaDelimitedString(actions) + "]}"; + } + /** * Construct a new {@link ApplicationPrivilegeDescriptor} from XContent. * diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java index 951c4acf10d0d..73a1d7fcde509 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/support/SecurityQueryTemplateEvaluator.java @@ -11,10 +11,8 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptType; -import org.elasticsearch.script.TemplateScript; +import org.elasticsearch.xpack.core.security.support.MustacheTemplateEvaluator; import org.elasticsearch.xpack.core.security.user.User; import java.io.IOException; @@ -66,27 +64,19 @@ public static String evaluateTemplate(final String querySource, final ScriptServ if (token != XContentParser.Token.START_OBJECT) { throw new ElasticsearchParseException("Unexpected token [" + token + "]"); } - Script script = Script.parse(parser); - // Add the user details to the params - Map params = new HashMap<>(); - if (script.getParams() != null) { - params.putAll(script.getParams()); - } Map userModel = new HashMap<>(); userModel.put("username", user.principal()); userModel.put("full_name", user.fullName()); userModel.put("email", user.email()); userModel.put("roles", Arrays.asList(user.roles())); userModel.put("metadata", Collections.unmodifiableMap(user.metadata())); - params.put("_user", userModel); - // Always enforce mustache script lang: - script = new Script(script.getType(), script.getType() == ScriptType.STORED ? null : "mustache", script.getIdOrCode(), - script.getOptions(), params); - TemplateScript compiledTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(script.getParams()); - return compiledTemplate.execute(); + Map extraParams = Collections.singletonMap("_user", userModel); + + return MustacheTemplateEvaluator.evaluate(scriptService, parser, extraParams); } else { return querySource; } } } + } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MustacheTemplateEvaluator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MustacheTemplateEvaluator.java new file mode 100644 index 0000000000000..02f730333de3a --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/MustacheTemplateEvaluator.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.support; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.TemplateScript; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +/** + * Utility class for evaluating Mustache templates at runtime. + */ +public final class MustacheTemplateEvaluator { + + private MustacheTemplateEvaluator() { + throw new UnsupportedOperationException("Cannot construct " + MustacheTemplateEvaluator.class); + } + + public static String evaluate(ScriptService scriptService, XContentParser parser, Map extraParams) throws IOException { + Script script = Script.parse(parser); + // Add the user details to the params + Map params = new HashMap<>(); + if (script.getParams() != null) { + params.putAll(script.getParams()); + } + extraParams.forEach(params::put); + // Always enforce mustache script lang: + script = new Script(script.getType(), script.getType() == ScriptType.STORED ? null : "mustache", script.getIdOrCode(), + script.getOptions(), params); + TemplateScript compiledTemplate = scriptService.compile(script, TemplateScript.CONTEXT).newInstance(script.getParams()); + return compiledTemplate.execute(); + } +} diff --git a/x-pack/plugin/core/src/main/resources/security-index-template.json b/x-pack/plugin/core/src/main/resources/security-index-template.json index 94bb2b03ee049..f4e3cd6db020d 100644 --- a/x-pack/plugin/core/src/main/resources/security-index-template.json +++ b/x-pack/plugin/core/src/main/resources/security-index-template.json @@ -45,6 +45,16 @@ "roles" : { "type" : "keyword" }, + "role_templates" : { + "properties": { + "template" : { + "type": "text" + }, + "format" : { + "type": "keyword" + } + } + }, "password" : { "type" : "keyword", "index" : false, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java new file mode 100644 index 0000000000000..8d3d8e3ac7890 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionRequestTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class StartDataFrameTransformTaskActionRequestTests extends + AbstractWireSerializingTestCase { + @Override + protected StartDataFrameTransformTaskAction.Request createTestInstance() { + return new StartDataFrameTransformTaskAction.Request(randomAlphaOfLength(4)); + } + + @Override + protected Writeable.Reader instanceReader() { + return StartDataFrameTransformTaskAction.Request::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java new file mode 100644 index 0000000000000..62165f87968e0 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/dataframe/action/StartDataFrameTransformTaskActionResponseTests.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.dataframe.action; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.test.AbstractWireSerializingTestCase; + +public class StartDataFrameTransformTaskActionResponseTests extends + AbstractWireSerializingTestCase { + @Override + protected StartDataFrameTransformTaskAction.Response createTestInstance() { + return new StartDataFrameTransformTaskAction.Response(randomBoolean()); + } + + @Override + protected Writeable.Reader instanceReader() { + return StartDataFrameTransformTaskAction.Response::new; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java index f1f3c053e2345..6953455489d1a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexlifecycle/WaitForNoFollowersStepTests.java @@ -132,6 +132,42 @@ public void onFailure(Exception e) { containsString("this index is a leader index; waiting for all following indices to cease following before proceeding")); } + public void testNoShardStats() { + WaitForNoFollowersStep step = createRandomInstance(); + + String indexName = randomAlphaOfLengthBetween(5,10); + + int numberOfShards = randomIntBetween(1, 100); + final IndexMetaData indexMetaData = IndexMetaData.builder(indexName) + .settings(settings(Version.CURRENT)) + .numberOfShards(numberOfShards) + .numberOfReplicas(randomIntBetween(1, 10)) + .build(); + + ShardStats sStats = new ShardStats(null, mockShardPath(), null, null, null, null); + ShardStats[] shardStats = new ShardStats[1]; + shardStats[0] = sStats; + mockIndexStatsCall(step.getClient(), indexName, new IndexStats(indexName, "uuid", shardStats)); + + final SetOnce conditionMetHolder = new SetOnce<>(); + final SetOnce stepInfoHolder = new SetOnce<>(); + step.evaluateCondition(indexMetaData, new AsyncWaitStep.Listener() { + @Override + public void onResponse(boolean conditionMet, ToXContentObject infomationContext) { + conditionMetHolder.set(conditionMet); + stepInfoHolder.set(infomationContext); + } + + @Override + public void onFailure(Exception e) { + fail("onFailure should not be called in this test, called with exception: " + e.getMessage()); + } + }); + + assertTrue(conditionMetHolder.get()); + assertNull(stepInfoHolder.get()); + } + public void testFailure() { WaitForNoFollowersStep step = createRandomInstance(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java new file mode 100644 index 0000000000000..cab10ca728323 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.security.authc.support.mapper; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName.Format; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; + +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; + +public class TemplateRoleNameTests extends ESTestCase { + + public void testParseRoles() throws Exception { + final TemplateRoleName role1 = parse("{ \"template\": { \"source\": \"_user_{{username}}\" } }"); + assertThat(role1, Matchers.instanceOf(TemplateRoleName.class)); + assertThat(role1.getTemplate().utf8ToString(), equalTo("{\"source\":\"_user_{{username}}\"}")); + assertThat(role1.getFormat(), equalTo(Format.STRING)); + + final TemplateRoleName role2 = parse( + "{ \"template\": \"{\\\"source\\\":\\\"{{#tojson}}groups{{/tojson}}\\\"}\", \"format\":\"json\" }"); + assertThat(role2, Matchers.instanceOf(TemplateRoleName.class)); + assertThat(role2.getTemplate().utf8ToString(), + equalTo("{\"source\":\"{{#tojson}}groups{{/tojson}}\"}")); + assertThat(role2.getFormat(), equalTo(Format.JSON)); + } + + public void testToXContent() throws Exception { + final String json = "{" + + "\"template\":\"{\\\"source\\\":\\\"" + randomAlphaOfLengthBetween(8, 24) + "\\\"}\"," + + "\"format\":\"" + randomFrom(Format.values()).formatName() + "\"" + + "}"; + assertThat(Strings.toString(parse(json)), equalTo(json)); + } + + public void testSerializeTemplate() throws Exception { + trySerialize(new TemplateRoleName(new BytesArray(randomAlphaOfLengthBetween(12, 60)), randomFrom(Format.values()))); + } + + public void testEqualsAndHashCode() throws Exception { + tryEquals(new TemplateRoleName(new BytesArray(randomAlphaOfLengthBetween(12, 60)), randomFrom(Format.values()))); + } + + public void testEvaluateRoles() throws Exception { + final ScriptService scriptService = new ScriptService(Settings.EMPTY, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), ScriptModule.CORE_CONTEXTS); + final ExpressionModel model = new ExpressionModel(); + model.defineField("username", "hulk"); + model.defineField("groups", Arrays.asList("avengers", "defenders", "panthenon")); + + final TemplateRoleName plainString = new TemplateRoleName(new BytesArray("{ \"source\":\"heroes\" }"), Format.STRING); + assertThat(plainString.getRoleNames(scriptService, model), contains("heroes")); + + final TemplateRoleName user = new TemplateRoleName(new BytesArray("{ \"source\":\"_user_{{username}}\" }"), Format.STRING); + assertThat(user.getRoleNames(scriptService, model), contains("_user_hulk")); + + final TemplateRoleName groups = new TemplateRoleName(new BytesArray("{ \"source\":\"{{#tojson}}groups{{/tojson}}\" }"), + Format.JSON); + assertThat(groups.getRoleNames(scriptService, model), contains("avengers", "defenders", "panthenon")); + } + + private TemplateRoleName parse(String json) throws IOException { + final XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + final TemplateRoleName role = TemplateRoleName.parse(parser); + assertThat(role, notNullValue()); + return role; + } + + public void trySerialize(TemplateRoleName original) throws Exception { + BytesStreamOutput output = new BytesStreamOutput(); + original.writeTo(output); + + final StreamInput rawInput = ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())); + final TemplateRoleName serialized = new TemplateRoleName(rawInput); + assertEquals(original, serialized); + } + + public void tryEquals(TemplateRoleName original) { + final EqualsHashCodeTestUtils.CopyFunction copy = + rmt -> new TemplateRoleName(rmt.getTemplate(), rmt.getFormat()); + final EqualsHashCodeTestUtils.MutateFunction mutate = rmt -> { + if (randomBoolean()) { + return new TemplateRoleName(rmt.getTemplate(), + randomValueOtherThan(rmt.getFormat(), () -> randomFrom(Format.values()))); + } else { + final String templateStr = rmt.getTemplate().utf8ToString(); + return new TemplateRoleName(new BytesArray(templateStr.substring(randomIntBetween(1, templateStr.length() / 2))), + rmt.getFormat()); + } + }; + EqualsHashCodeTestUtils.checkEqualsAndHashCode(original, copy, mutate); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermissionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermissionTests.java index 992ca8db1b083..0f81b872b86d4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermissionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/permission/ApplicationPermissionTests.java @@ -13,15 +13,16 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; -import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; public class ApplicationPermissionTests extends ESTestCase { @@ -34,6 +35,7 @@ public class ApplicationPermissionTests extends ESTestCase { private ApplicationPrivilege app1Delete = storePrivilege("app1", "delete", "write/delete"); private ApplicationPrivilege app1Create = storePrivilege("app1", "create", "write/create"); private ApplicationPrivilege app2Read = storePrivilege("app2", "read", "read/*"); + private ApplicationPrivilege otherAppRead = storePrivilege("other-app", "read", "read/*"); private ApplicationPrivilege storePrivilege(String app, String name, String... patterns) { store.add(new ApplicationPrivilegeDescriptor(app, name, Sets.newHashSet(patterns), Collections.emptyMap())); @@ -104,6 +106,16 @@ public void testDoesNotMatchAcrossApplications() { assertThat(buildPermission(app1All, "*").grants(app2Read, "123"), equalTo(false)); } + public void testMatchingWithWildcardApplicationNames() { + final Set readAllApp = ApplicationPrivilege.get("app*", Collections.singleton("read"), store); + assertThat(buildPermission(readAllApp, "*").grants(app1Read, "123"), equalTo(true)); + assertThat(buildPermission(readAllApp, "foo/*").grants(app2Read, "foo/bar"), equalTo(true)); + + assertThat(buildPermission(readAllApp, "*").grants(app1Write, "123"), equalTo(false)); + assertThat(buildPermission(readAllApp, "foo/*").grants(app2Read, "bar/baz"), equalTo(false)); + assertThat(buildPermission(readAllApp, "*").grants(otherAppRead, "abc"), equalTo(false)); + } + public void testMergedPermissionChecking() { final ApplicationPrivilege app1ReadWrite = compositePrivilege("app1", app1Read, app1Write); final ApplicationPermission hasPermission = buildPermission(app1ReadWrite, "allow/*"); @@ -138,16 +150,27 @@ public void testInspectPermissionContents() { } private ApplicationPrivilege actionPrivilege(String appName, String... actions) { - return ApplicationPrivilege.get(appName, Sets.newHashSet(actions), Collections.emptyList()); + final Set privileges = ApplicationPrivilege.get(appName, Sets.newHashSet(actions), Collections.emptyList()); + assertThat(privileges, hasSize(1)); + return privileges.iterator().next(); } private ApplicationPrivilege compositePrivilege(String application, ApplicationPrivilege... children) { Set names = Stream.of(children).map(ApplicationPrivilege::name).flatMap(Set::stream).collect(Collectors.toSet()); - return ApplicationPrivilege.get(application, names, store); + final Set privileges = ApplicationPrivilege.get(application, names, store); + assertThat(privileges, hasSize(1)); + return privileges.iterator().next(); } - private ApplicationPermission buildPermission(ApplicationPrivilege privilege, String... resources) { - return new ApplicationPermission(singletonList(new Tuple<>(privilege, Sets.newHashSet(resources)))); + return buildPermission(Collections.singleton(privilege), resources); + } + + private ApplicationPermission buildPermission(Collection privileges, String... resources) { + final Set resourceSet = Sets.newHashSet(resources); + final List>> privilegesAndResources = privileges.stream() + .map(p -> new Tuple<>(p, resourceSet)) + .collect(Collectors.toList()); + return new ApplicationPermission(privilegesAndResources); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeTests.java index c65f06f05f957..cd917ed81f16e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; +import org.hamcrest.Matchers; import org.junit.Assert; import java.util.Arrays; @@ -22,9 +23,11 @@ import static org.elasticsearch.common.Strings.collectionToCommaDelimitedString; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.iterableWithSize; public class ApplicationPrivilegeTests extends ESTestCase { @@ -59,6 +62,12 @@ public void testValidationOfApplicationName() { assertNoException(app, () -> ApplicationPrivilege.validateApplicationName(app)); assertNoException(app, () -> ApplicationPrivilege.validateApplicationNameOrWildcard(app)); } + + // wildcards in the suffix + for (String app : Arrays.asList("app1-*", "app1-foo*", "app1-.*", "app1-.foo.*", appNameWithSpecialChars + "*")) { + assertValidationFailure(app, "application name", () -> ApplicationPrivilege.validateApplicationName(app)); + assertNoException(app, () -> ApplicationPrivilege.validateApplicationNameOrWildcard(app)); + } } public void testValidationOfPrivilegeName() { @@ -101,16 +110,23 @@ public void testNonePrivilege() { } public void testGetPrivilegeByName() { - final ApplicationPrivilegeDescriptor descriptor = descriptor("my-app", "read", "data:read/*", "action:login"); + final ApplicationPrivilegeDescriptor myRead = descriptor("my-app", "read", "data:read/*", "action:login"); final ApplicationPrivilegeDescriptor myWrite = descriptor("my-app", "write", "data:write/*", "action:login"); final ApplicationPrivilegeDescriptor myAdmin = descriptor("my-app", "admin", "data:read/*", "action:*"); final ApplicationPrivilegeDescriptor yourRead = descriptor("your-app", "read", "data:read/*", "action:login"); - final Set stored = Sets.newHashSet(descriptor, myWrite, myAdmin, yourRead); + final Set stored = Sets.newHashSet(myRead, myWrite, myAdmin, yourRead); + + final Set myAppRead = ApplicationPrivilege.get("my-app", Collections.singleton("read"), stored); + assertThat(myAppRead, iterableWithSize(1)); + assertPrivilegeEquals(myAppRead.iterator().next(), myRead); - assertEqual(ApplicationPrivilege.get("my-app", Collections.singleton("read"), stored), descriptor); - assertEqual(ApplicationPrivilege.get("my-app", Collections.singleton("write"), stored), myWrite); + final Set myAppWrite = ApplicationPrivilege.get("my-app", Collections.singleton("write"), stored); + assertThat(myAppWrite, iterableWithSize(1)); + assertPrivilegeEquals(myAppWrite.iterator().next(), myWrite); - final ApplicationPrivilege readWrite = ApplicationPrivilege.get("my-app", Sets.newHashSet("read", "write"), stored); + final Set myReadWrite = ApplicationPrivilege.get("my-app", Sets.newHashSet("read", "write"), stored); + assertThat(myReadWrite, Matchers.hasSize(1)); + final ApplicationPrivilege readWrite = myReadWrite.iterator().next(); assertThat(readWrite.getApplication(), equalTo("my-app")); assertThat(readWrite.name(), containsInAnyOrder("read", "write")); assertThat(readWrite.getPatterns(), arrayContainingInAnyOrder("data:read/*", "data:write/*", "action:login")); @@ -124,10 +140,10 @@ public void testGetPrivilegeByName() { } } - private void assertEqual(ApplicationPrivilege myReadPriv, ApplicationPrivilegeDescriptor myRead) { - assertThat(myReadPriv.getApplication(), equalTo(myRead.getApplication())); - assertThat(getPrivilegeName(myReadPriv), equalTo(myRead.getName())); - assertThat(Sets.newHashSet(myReadPriv.getPatterns()), equalTo(myRead.getActions())); + private void assertPrivilegeEquals(ApplicationPrivilege privilege, ApplicationPrivilegeDescriptor descriptor) { + assertThat(privilege.getApplication(), equalTo(descriptor.getApplication())); + assertThat(privilege.name(), contains(descriptor.getName())); + assertThat(Sets.newHashSet(privilege.getPatterns()), equalTo(descriptor.getActions())); } private ApplicationPrivilegeDescriptor descriptor(String application, String name, String... actions) { diff --git a/x-pack/plugin/data-frame/build.gradle b/x-pack/plugin/data-frame/build.gradle index bff8118bfc425..ad4d846fd160f 100644 --- a/x-pack/plugin/data-frame/build.gradle +++ b/x-pack/plugin/data-frame/build.gradle @@ -8,9 +8,6 @@ esplugin { extendedPlugins = ['x-pack-core'] } -compileJava.options.compilerArgs << "-Xlint:-rawtypes" -compileTestJava.options.compilerArgs << "-Xlint:-rawtypes" - dependencies { compileOnly "org.elasticsearch:elasticsearch:${version}" diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java index 750faf8dade51..2367e255cd9ba 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameAuditorIT.java @@ -49,6 +49,7 @@ public void createIndexes() throws IOException { setupUser(TEST_USER_NAME, Arrays.asList("data_frame_transforms_admin", DATA_ACCESS_ROLE)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40594") @SuppressWarnings("unchecked") public void testAuditorWritesAudits() throws Exception { String transformId = "simplePivotForAudit"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java index 34d1a388ff607..99c08f1a50583 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFramePivotRestIT.java @@ -268,6 +268,7 @@ public void testPreviewTransform() throws Exception { }); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40537") public void testPivotWithMaxOnDateField() throws Exception { String transformId = "simpleDateHistogramPivotWithMaxTime"; String dataFrameIndex = "pivot_reviews_via_date_histogram_with_max_time"; diff --git a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java index 887279ef20fc8..de0757a30ba1b 100644 --- a/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java +++ b/x-pack/plugin/data-frame/qa/single-node-tests/src/test/java/org/elasticsearch/xpack/dataframe/integration/DataFrameTaskFailedStateIT.java @@ -21,6 +21,11 @@ public class DataFrameTaskFailedStateIT extends DataFrameRestTestCase { + public void testDummy() { + // remove once the awaits fix below is resolved + } + + @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/40543") public void testFailureStateInteraction() throws Exception { createReviewsIndex(); String transformId = "failure_pivot_1"; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java index 131ad690d2b66..2cdc4009e785b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportDeleteDataFrameTransformAction.java @@ -12,20 +12,18 @@ import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction; -import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Request; import org.elasticsearch.xpack.core.dataframe.action.DeleteDataFrameTransformAction.Response; +import org.elasticsearch.xpack.core.indexing.IndexerState; import org.elasticsearch.xpack.dataframe.persistence.DataFrameTransformsConfigManager; import org.elasticsearch.xpack.dataframe.transforms.DataFrameTransformTask; @@ -36,8 +34,7 @@ public class TransportDeleteDataFrameTransformAction extends TransportTasksActio private final DataFrameTransformsConfigManager transformsConfigManager; @Inject - public TransportDeleteDataFrameTransformAction(TransportService transportService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, PersistentTasksService persistentTasksService, + public TransportDeleteDataFrameTransformAction(TransportService transportService, ActionFilters actionFilters, ClusterService clusterService, DataFrameTransformsConfigManager transformsConfigManager) { super(DeleteDataFrameTransformAction.NAME, clusterService, transportService, actionFilters, Request::new, Response::new, Response::new, ThreadPool.Names.SAME); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index 13b633359f5cb..78f6823034811 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -82,7 +82,13 @@ private void getPreview(Pivot pivot, ActionListener>> l r -> { final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); - listener.onResponse(pivot.extractResults(agg, deducedMappings, stats).collect(Collectors.toList())); + // remove all internal fields + List> results = pivot.extractResults(agg, deducedMappings, stats) + .map(record -> { + record.keySet().removeIf(k -> k.startsWith("_")); + return record; + }).collect(Collectors.toList()); + listener.onResponse(results); }, listener::onFailure )); diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java index a6f52f22da407..f68e246ed860b 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportStartDataFrameTransformAction.java @@ -55,8 +55,8 @@ public TransportStartDataFrameTransformAction(TransportService transportService, ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, DataFrameTransformsConfigManager dataFrameTransformsConfigManager, PersistentTasksService persistentTasksService, Client client) { - super(StartDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - StartDataFrameTransformAction.Request::new); + super(StartDataFrameTransformAction.NAME, transportService, clusterService, threadPool, actionFilters, + StartDataFrameTransformAction.Request::new, indexNameExpressionResolver); this.licenseState = licenseState; this.dataFrameTransformsConfigManager = dataFrameTransformsConfigManager; this.persistentTasksService = persistentTasksService; diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java index a9fcb77d50a11..bb07722ddeed0 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/DataFrameIndexer.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.indexing.AsyncTwoPhaseIndexer; @@ -73,15 +74,28 @@ private Stream processBucketsToIndexRequests(CompositeAggregation String indexName = transformConfig.getDestination().getIndex(); return pivot.extractResults(agg, getFieldMappings(), getStats()).map(document -> { + String id = (String) document.get(DataFrameField.DOCUMENT_ID_FIELD); + + if (id == null) { + throw new RuntimeException("Expected a document id but got null."); + } + XContentBuilder builder; try { builder = jsonBuilder(); - builder.map(document); + builder.startObject(); + for (Map.Entry value : document.entrySet()) { + // skip all internal fields + if (value.getKey().startsWith("_") == false) { + builder.field(value.getKey(), value.getValue()); + } + } + builder.endObject(); } catch (IOException e) { throw new UncheckedIOException(e); } - IndexRequest request = new IndexRequest(indexName).source(builder); + IndexRequest request = new IndexRequest(indexName).source(builder).id(id); return request; }); } diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java new file mode 100644 index 0000000000000..d9223fe90dd4f --- /dev/null +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/IDGenerator.java @@ -0,0 +1,96 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.Numbers; +import org.elasticsearch.common.hash.MurmurHash3; + +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.TreeMap; + +/** + * ID Generator for creating unique but deterministic document ids. + * + * uses MurmurHash with 128 bits + */ +public final class IDGenerator { + private static final byte[] NULL_VALUE = "__NULL_VALUE__".getBytes(StandardCharsets.UTF_8); + private static final byte DELIM = '$'; + private static final long SEED = 19; + private static final int MAX_FIRST_BYTES = 5; + + private final TreeMap objectsForIDGeneration = new TreeMap<>(); + + public IDGenerator() { + } + + /** + * Add a value to the generator + * @param key object identifier, to be used for consistent sorting + * @param value the value + */ + public void add(String key, Object value) { + if (objectsForIDGeneration.containsKey(key)) { + throw new IllegalArgumentException("Keys must be unique"); + } + objectsForIDGeneration.put(key, value); + } + + /** + * Create a document id based on the input objects + * + * @return a document id as string + */ + public String getID() { + if (objectsForIDGeneration.size() == 0) { + throw new RuntimeException("Add at least 1 object before generating the ID"); + } + + BytesRefBuilder buffer = new BytesRefBuilder(); + BytesRefBuilder hashedBytes = new BytesRefBuilder(); + + for (Object value : objectsForIDGeneration.values()) { + byte[] v = getBytes(value); + + buffer.append(v, 0, v.length); + buffer.append(DELIM); + + // keep the 1st byte of every object + if (hashedBytes.length() <= MAX_FIRST_BYTES) { + hashedBytes.append(v[0]); + } + } + MurmurHash3.Hash128 hasher = MurmurHash3.hash128(buffer.bytes(), 0, buffer.length(), SEED, new MurmurHash3.Hash128()); + hashedBytes.append(Numbers.longToBytes(hasher.h1), 0, 8); + hashedBytes.append(Numbers.longToBytes(hasher.h2), 0, 8); + return Base64.getUrlEncoder().withoutPadding().encodeToString(hashedBytes.bytes()); + } + + /** + * Turns objects into byte arrays, only supporting types returned groupBy + * + * @param value the value as object + * @return a byte representation of the input object + */ + private static byte[] getBytes(Object value) { + if (value == null) { + return NULL_VALUE; + } else if (value instanceof String) { + return ((String) value).getBytes(StandardCharsets.UTF_8); + } else if (value instanceof Long) { + return Numbers.longToBytes((Long) value); + } else if (value instanceof Double) { + return Numbers.doubleToBytes((Double) value); + } else if (value instanceof Integer) { + return Numbers.intToBytes((Integer) value); + } + + throw new IllegalArgumentException("Value of type [" + value.getClass() + "] is not supported"); + } +} diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java index fa7536497c4f0..5d77f82e610ab 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtils.java @@ -13,8 +13,10 @@ import org.elasticsearch.search.aggregations.bucket.composite.CompositeAggregation; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation.SingleValue; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; +import org.elasticsearch.xpack.dataframe.transforms.IDGenerator; import java.util.Collection; import java.util.HashMap; @@ -43,10 +45,17 @@ public static Stream> extractCompositeAggregationResults(Com DataFrameIndexerTransformStats stats) { return agg.getBuckets().stream().map(bucket -> { stats.incrementNumDocuments(bucket.getDocCount()); - Map document = new HashMap<>(); - groups.getGroups().keySet().forEach(destinationFieldName -> - document.put(destinationFieldName, bucket.getKey().get(destinationFieldName))); + // generator to create unique but deterministic document ids, so we + // - do not create duplicates if we re-run after failure + // - update documents + IDGenerator idGen = new IDGenerator(); + + groups.getGroups().keySet().forEach(destinationFieldName -> { + Object value = bucket.getKey().get(destinationFieldName); + idGen.add(destinationFieldName, value); + document.put(destinationFieldName, value); + }); for (AggregationBuilder aggregationBuilder : aggregationBuilders) { String aggName = aggregationBuilder.getName(); @@ -71,6 +80,9 @@ public static Stream> extractCompositeAggregationResults(Com assert false; } } + + document.put(DataFrameField.DOCUMENT_ID_FIELD, idGen.getID()); + return document; }); } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java index ea5362d184b34..0868315165cdc 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/checkpoint/DataFrameTransformsCheckpointServiceTests.java @@ -82,6 +82,7 @@ public void testExtractIndexCheckpointsLostPrimaries() { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40368") public void testExtractIndexCheckpointsInconsistentGlobalCheckpoints() { Map expectedCheckpoints = new HashMap<>(); Set indices = randomUserIndices(); diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java new file mode 100644 index 0000000000000..fd378a2c4c171 --- /dev/null +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/IDGeneratorTests.java @@ -0,0 +1,63 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.dataframe.transforms; + +import org.elasticsearch.test.ESTestCase; + +public class IDGeneratorTests extends ESTestCase { + + public void testSupportedTypes() { + IDGenerator idGen = new IDGenerator(); + idGen.add("key1", "value1"); + String id = idGen.getID(); + idGen.add("key2", null); + assertNotEquals(id, idGen.getID()); + id = idGen.getID(); + idGen.add("key3", "value3"); + assertNotEquals(id, idGen.getID()); + id = idGen.getID(); + idGen.add("key4", 12L); + assertNotEquals(id, idGen.getID()); + id = idGen.getID(); + idGen.add("key5", 44.444); + assertNotEquals(id, idGen.getID()); + idGen.add("key6", 13); + assertNotEquals(id, idGen.getID()); + } + + public void testOrderIndependence() { + IDGenerator idGen = new IDGenerator(); + idGen.add("key1", "value1"); + idGen.add("key2", "value2"); + String id1 = idGen.getID(); + + idGen = new IDGenerator(); + idGen.add("key2", "value2"); + idGen.add("key1", "value1"); + String id2 = idGen.getID(); + + assertEquals(id1, id2); + } + + public void testEmptyThrows() { + IDGenerator idGen = new IDGenerator(); + + RuntimeException e = expectThrows(RuntimeException.class, () -> idGen.getID()); + + assertEquals("Add at least 1 object before generating the ID", e.getMessage()); + } + + public void testDuplicatedKeyThrows() { + IDGenerator idGen = new IDGenerator(); + idGen.add("key1", "value1"); + + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> idGen.add("key1", "some_other_value")); + + assertEquals("Keys must be unique", e.getMessage()); + } + +} diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java index 287f327d0f664..eedf6264f348b 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/transforms/pivot/AggregationResultUtilsTests.java @@ -44,6 +44,7 @@ import org.elasticsearch.search.aggregations.pipeline.ParsedStatsBucket; import org.elasticsearch.search.aggregations.pipeline.StatsBucketPipelineAggregationBuilder; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.dataframe.DataFrameField; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.pivot.GroupConfig; @@ -51,8 +52,10 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.stream.Collectors; import static java.util.Arrays.asList; @@ -147,7 +150,7 @@ aggTypedName, asMap( executeTest(groupBy, aggregationBuilders, input, fieldTypeMap, expected, 20); } - public void testExtractCompositeAggregationResultsMultiSources() throws IOException { + public void testExtractCompositeAggregationResultsMultipleGroups() throws IOException { String targetField = randomAlphaOfLengthBetween(5, 10); String targetField2 = randomAlphaOfLengthBetween(5, 10) + "_2"; @@ -406,19 +409,159 @@ aggTypedName2, asMap( executeTest(groupBy, aggregationBuilders, input, fieldTypeMap, expected, 10); } + public void testExtractCompositeAggregationResultsDocIDs() throws IOException { + String targetField = randomAlphaOfLengthBetween(5, 10); + String targetField2 = randomAlphaOfLengthBetween(5, 10) + "_2"; + + GroupConfig groupBy = parseGroupConfig("{" + + "\"" + targetField + "\" : {" + + " \"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + " } }," + + "\"" + targetField2 + "\" : {" + + " \"terms\" : {" + + " \"field\" : \"doesn't_matter_for_this_test\"" + + " } }" + + "}"); + + String aggName = randomAlphaOfLengthBetween(5, 10); + String aggTypedName = "avg#" + aggName; + Collection aggregationBuilders = Collections.singletonList(AggregationBuilders.avg(aggName)); + + Map inputFirstRun = asMap( + "buckets", + asList( + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 42.33), + DOC_COUNT, 1), + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 8.4), + DOC_COUNT, 2), + asMap( + KEY, asMap( + targetField, "ID2", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 28.99), + DOC_COUNT, 3), + asMap( + KEY, asMap( + targetField, "ID3", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 12.55), + DOC_COUNT, 4) + )); + + Map inputSecondRun = asMap( + "buckets", + asList( + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 433.33), + DOC_COUNT, 12), + asMap( + KEY, asMap( + targetField, "ID1", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 83.4), + DOC_COUNT, 32), + asMap( + KEY, asMap( + targetField, "ID2", + targetField2, "ID1_2" + ), + aggTypedName, asMap( + "value", 21.99), + DOC_COUNT, 2), + asMap( + KEY, asMap( + targetField, "ID3", + targetField2, "ID2_2" + ), + aggTypedName, asMap( + "value", 122.55), + DOC_COUNT, 44) + )); + DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); + + Map fieldTypeMap = asStringMap( + aggName, "double", + targetField, "keyword", + targetField2, "keyword" + ); + + List> resultFirstRun = runExtraction(groupBy, aggregationBuilders, inputFirstRun, fieldTypeMap, stats); + List> resultSecondRun = runExtraction(groupBy, aggregationBuilders, inputSecondRun, fieldTypeMap, stats); + + assertNotEquals(resultFirstRun, resultSecondRun); + + Set documentIdsFirstRun = new HashSet<>(); + resultFirstRun.forEach(m -> { + documentIdsFirstRun.add((String) m.get(DataFrameField.DOCUMENT_ID_FIELD)); + }); + + assertEquals(4, documentIdsFirstRun.size()); + + Set documentIdsSecondRun = new HashSet<>(); + resultSecondRun.forEach(m -> { + documentIdsSecondRun.add((String) m.get(DataFrameField.DOCUMENT_ID_FIELD)); + }); + + assertEquals(4, documentIdsSecondRun.size()); + assertEquals(documentIdsFirstRun, documentIdsSecondRun); + } + + + private void executeTest(GroupConfig groups, Collection aggregationBuilders, Map input, Map fieldTypeMap, List> expected, long expectedDocCounts) throws IOException { DataFrameIndexerTransformStats stats = new DataFrameIndexerTransformStats(); XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); builder.map(input); + List> result = runExtraction(groups, aggregationBuilders, input, fieldTypeMap, stats); + + // remove the document ids and test uniqueness + Set documentIds = new HashSet<>(); + result.forEach(m -> { + documentIds.add((String) m.remove(DataFrameField.DOCUMENT_ID_FIELD)); + }); + + assertEquals(result.size(), documentIds.size()); + assertEquals(expected, result); + assertEquals(expectedDocCounts, stats.getNumDocuments()); + + } + + private List> runExtraction(GroupConfig groups, Collection aggregationBuilders, + Map input, Map fieldTypeMap, DataFrameIndexerTransformStats stats) throws IOException { + + XContentBuilder builder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + builder.map(input); + try (XContentParser parser = createParser(builder)) { CompositeAggregation agg = ParsedComposite.fromXContent(parser, "my_feature"); - List> result = AggregationResultUtils - .extractCompositeAggregationResults(agg, groups, aggregationBuilders, fieldTypeMap, stats).collect(Collectors.toList()); - - assertEquals(expected, result); - assertEquals(expectedDocCounts, stats.getNumDocuments()); + return AggregationResultUtils.extractCompositeAggregationResults(agg, groups, aggregationBuilders, fieldTypeMap, stats) + .collect(Collectors.toList()); } } diff --git a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java index 6a0714076a317..4ca60acac37ed 100644 --- a/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java +++ b/x-pack/plugin/data-frame/src/test/java/org/elasticsearch/xpack/dataframe/util/BatchedDataIteratorTests.java @@ -146,7 +146,7 @@ public void testQueryReturnsThreeBatches() throws Exception { testIterator.next(future); batch = future.get(); assertEquals(1, batch.size()); - assertTrue(batch.containsAll(Collections.singletonList(createJsonDoc("f")))); + assertTrue(batch.contains(createJsonDoc("f"))); assertFalse(testIterator.hasNext()); assertTrue(wasScrollCleared); @@ -183,7 +183,6 @@ private void assertSearchRequest() { SearchRequest searchRequest = searchRequests.get(0); assertThat(searchRequest.indices(), equalTo(new String[] {INDEX_NAME})); assertThat(searchRequest.scroll().keepAlive(), equalTo(TimeValue.timeValueMinutes(5))); - assertThat(searchRequest.types().length, equalTo(0)); assertThat(searchRequest.source().query(), equalTo(QueryBuilders.matchAllQuery())); assertThat(searchRequest.source().trackTotalHitsUpTo(), is(SearchContext.TRACK_TOTAL_HITS_ACCURATE)); } diff --git a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java index 470260a7efac0..130d6deed567f 100644 --- a/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java +++ b/x-pack/plugin/graph/src/main/java/org/elasticsearch/xpack/graph/rest/action/RestGraphAction.java @@ -41,6 +41,8 @@ public class RestGraphAction extends XPackRestHandler { private static final DeprecationLogger deprecationLogger = new DeprecationLogger(LogManager.getLogger(RestGraphAction.class)); + public static final String TYPES_DEPRECATION_MESSAGE = "[types removal]" + + " Specifying types in graph requests is deprecated."; public static final ParseField TIMEOUT_FIELD = new ParseField("timeout"); public static final ParseField SIGNIFICANCE_FIELD = new ParseField("use_significance"); @@ -111,7 +113,10 @@ public RestChannelConsumer doPrepareRequest(final RestRequest request, final XPa parseHop(parser, currentHop, graphRequest); } - graphRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + if (request.hasParam("type")) { + deprecationLogger.deprecatedAndMaybeLog("graph_with_types", TYPES_DEPRECATION_MESSAGE); + graphRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + } return channel -> client.es().execute(INSTANCE, graphRequest, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java new file mode 100644 index 0000000000000..486ac4e70e346 --- /dev/null +++ b/x-pack/plugin/graph/src/test/java/org/elasticsearch/xpack/graph/rest/action/RestGraphActionTests.java @@ -0,0 +1,35 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.graph.rest.action; + +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.test.rest.RestActionTestCase; +import org.junit.Before; + +public class RestGraphActionTests extends RestActionTestCase { + + @Before + public void setUpAction() { + new RestGraphAction(Settings.EMPTY, controller()); + } + + public void testTypeInPath() { + RestRequest request = new FakeRestRequest.Builder(xContentRegistry()) + .withMethod(RestRequest.Method.GET) + .withPath("/some_index/some_type/_graph/explore") + .withContent(new BytesArray("{}"), XContentType.JSON) + .build(); + + dispatchRequest(request); + assertWarnings(RestGraphAction.TYPES_DEPRECATION_MESSAGE); + } + +} diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java index 790dd5de632e6..2444cbf99fd52 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/indexlifecycle/IndexLifecycleMetadataTests.java @@ -16,18 +16,18 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.xpack.core.indexlifecycle.FreezeAction; -import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.test.AbstractDiffableSerializationTestCase; import org.elasticsearch.xpack.core.indexlifecycle.AllocateAction; import org.elasticsearch.xpack.core.indexlifecycle.DeleteAction; import org.elasticsearch.xpack.core.indexlifecycle.ForceMergeAction; +import org.elasticsearch.xpack.core.indexlifecycle.FreezeAction; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata; import org.elasticsearch.xpack.core.indexlifecycle.IndexLifecycleMetadata.IndexLifecycleMetadataDiff; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleAction; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicy; import org.elasticsearch.xpack.core.indexlifecycle.LifecyclePolicyMetadata; import org.elasticsearch.xpack.core.indexlifecycle.LifecycleType; +import org.elasticsearch.xpack.core.indexlifecycle.OperationMode; import org.elasticsearch.xpack.core.indexlifecycle.Phase; import org.elasticsearch.xpack.core.indexlifecycle.ReadOnlyAction; import org.elasticsearch.xpack.core.indexlifecycle.RolloverAction; @@ -137,7 +137,7 @@ protected Reader> diffReader() { } public void testMinimumSupportedVersion() { - assertEquals(Version.V_7_0_0, createTestInstance().getMinimalSupportedVersion()); + assertEquals(Version.V_6_6_0, createTestInstance().getMinimalSupportedVersion()); } public void testcontext() { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index fdfc6c9a59498..c6f269b1edd4d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -370,7 +370,7 @@ public Collection createComponents(Client client, ClusterService cluster NamedXContentRegistry xContentRegistry, Environment environment, NodeEnvironment nodeEnvironment, NamedWriteableRegistry namedWriteableRegistry) { try { - return createComponents(client, threadPool, clusterService, resourceWatcherService); + return createComponents(client, threadPool, clusterService, resourceWatcherService, scriptService); } catch (final Exception e) { throw new IllegalStateException("security initialization failed", e); } @@ -378,7 +378,7 @@ public Collection createComponents(Client client, ClusterService cluster // pkg private for testing - tests want to pass in their set of extensions hence we are not using the extension service directly Collection createComponents(Client client, ThreadPool threadPool, ClusterService clusterService, - ResourceWatcherService resourceWatcherService) throws Exception { + ResourceWatcherService resourceWatcherService, ScriptService scriptService) throws Exception { if (enabled == false) { return Collections.emptyList(); } @@ -404,7 +404,8 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste // realms construction final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client, securityIndex.get()); - final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityIndex.get()); + final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityIndex.get(), + scriptService); final AnonymousUser anonymousUser = new AnonymousUser(settings); final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore, anonymousUser, securityIndex.get(), threadPool); @@ -617,7 +618,6 @@ public static List> getSettings(boolean transportClientMode, List ACCEPT_DEFAULT_PASSWORD_SETTING = Setting.boolSetting( - SecurityField.setting("authc.accept_default_password"), true, Setting.Property.NodeScope, Setting.Property.Filtered, - Setting.Property.Deprecated); public static final Setting BOOTSTRAP_ELASTIC_PASSWORD = SecureSetting.secureString("bootstrap.password", KeyStoreWrapper.SEED_SETTING); @@ -250,7 +246,6 @@ private Version getDefinedVersion(String username) { } public static void addSettings(List> settingsList) { - settingsList.add(ACCEPT_DEFAULT_PASSWORD_SETTING); settingsList.add(BOOTSTRAP_ELASTIC_PASSWORD); } } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index cbb352e67ab39..e8d874bc9d481 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.xpack.core.security.ScrollHelper; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheResponse; @@ -51,7 +52,6 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.function.Supplier; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.action.DocWriteResponse.Result.CREATED; import static org.elasticsearch.action.DocWriteResponse.Result.DELETED; @@ -99,12 +99,14 @@ public void onFailure(Exception e) { private final Settings settings; private final Client client; private final SecurityIndexManager securityIndex; + private final ScriptService scriptService; private final List realmsToRefresh = new CopyOnWriteArrayList<>(); - public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex) { + public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex, ScriptService scriptService) { this.settings = settings; this.client = client; this.securityIndex = securityIndex; + this.scriptService = scriptService; } private String getNameFromId(String id) { @@ -120,7 +122,7 @@ private String getIdForName(String name) { * Loads all mappings from the index. * package private for unit testing */ - void loadMappings(ActionListener> listener) { + protected void loadMappings(ActionListener> listener) { if (securityIndex.isIndexUpToDate() == false) { listener.onFailure(new IllegalStateException( "Security index is not on the current version - the native realm will not be operational until " + @@ -149,7 +151,7 @@ void loadMappings(ActionListener> listener) { } } - private ExpressionRoleMapping buildMapping(String id, BytesReference source) { + protected ExpressionRoleMapping buildMapping(String id, BytesReference source) { try (InputStream stream = source.streamInput(); XContentParser parser = XContentType.JSON.xContent() .createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, stream)) { @@ -349,17 +351,16 @@ public void resolveRoles(UserData user, ActionListener> listener) { getRoleMappings(null, ActionListener.wrap( mappings -> { final ExpressionModel model = user.asModel(); - Stream stream = mappings.stream() - .filter(ExpressionRoleMapping::isEnabled) - .filter(m -> m.getExpression().match(model)); - if (logger.isTraceEnabled()) { - stream = stream.map(m -> { - logger.trace("User [{}] matches role-mapping [{}] with roles [{}]", user.getUsername(), m.getName(), - m.getRoles()); - return m; - }); - } - final Set roles = stream.flatMap(m -> m.getRoles().stream()).collect(Collectors.toSet()); + final Set roles = mappings.stream() + .filter(ExpressionRoleMapping::isEnabled) + .filter(m -> m.getExpression().match(model)) + .flatMap(m -> { + final Set roleNames = m.getRoleNames(scriptService, model); + logger.trace("Applying role-mapping [{}] to user-model [{}] produced role-names [{}]", + m.getName(), model, roleNames); + return roleNames.stream(); + }) + .collect(Collectors.toSet()); logger.debug("Mapping user [{}] to roles [{}]", user, roles); listener.onResponse(roles); }, listener::onFailure diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index 2d1d4a98b4ba6..48659b8968661 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -402,8 +402,8 @@ public static void buildRoleFromDescriptors(Collection roleDescr .flatMap(Collection::stream) .collect(Collectors.toSet()); privilegeStore.getPrivileges(applicationNames, applicationPrivilegeNames, ActionListener.wrap(appPrivileges -> { - applicationPrivilegesMap.forEach((key, names) -> - builder.addApplicationPrivilege(ApplicationPrivilege.get(key.v1(), names, appPrivileges), key.v2())); + applicationPrivilegesMap.forEach((key, names) -> ApplicationPrivilege.get(key.v1(), names, appPrivileges) + .forEach(priv -> builder.addApplicationPrivilege(priv, key.v2()))); listener.onResponse(builder.build()); }, listener::onFailure)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java index 09c89752f8314..19694bb003314 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStore.java @@ -34,9 +34,11 @@ import org.elasticsearch.common.xcontent.XContentParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.TermQueryBuilder; +import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.security.ScrollHelper; import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheRequest; @@ -46,6 +48,7 @@ import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.IOException; +import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; @@ -62,6 +65,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor.DOC_TYPE_VALUE; +import static org.elasticsearch.xpack.core.security.authz.privilege.ApplicationPrivilegeDescriptor.Fields.APPLICATION; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; /** @@ -97,7 +101,7 @@ public void getPrivileges(Collection applications, Collection na listener.onResponse(Collections.emptyList()); } else if (frozenSecurityIndex.isAvailable() == false) { listener.onFailure(frozenSecurityIndex.getUnavailableReason()); - } else if (applications != null && applications.size() == 1 && names != null && names.size() == 1) { + } else if (isSinglePrivilegeMatch(applications, names)) { getPrivilege(Objects.requireNonNull(Iterables.get(applications, 0)), Objects.requireNonNull(Iterables.get(names, 0)), ActionListener.wrap(privilege -> listener.onResponse(privilege == null ? Collections.emptyList() : Collections.singletonList(privilege)), @@ -110,11 +114,14 @@ public void getPrivileges(Collection applications, Collection na if (isEmpty(applications) && isEmpty(names)) { query = typeQuery; } else if (isEmpty(names)) { - query = QueryBuilders.boolQuery().filter(typeQuery).filter( - QueryBuilders.termsQuery(ApplicationPrivilegeDescriptor.Fields.APPLICATION.getPreferredName(), applications)); + query = QueryBuilders.boolQuery().filter(typeQuery).filter(getApplicationNameQuery(applications)); } else if (isEmpty(applications)) { query = QueryBuilders.boolQuery().filter(typeQuery) - .filter(QueryBuilders.termsQuery(ApplicationPrivilegeDescriptor.Fields.NAME.getPreferredName(), names)); + .filter(getPrivilegeNameQuery(names)); + } else if (hasWildcard(applications)) { + query = QueryBuilders.boolQuery().filter(typeQuery) + .filter(getApplicationNameQuery(applications)) + .filter(getPrivilegeNameQuery(names)); } else { final String[] docIds = applications.stream() .flatMap(a -> names.stream().map(n -> toDocId(a, n))) @@ -139,6 +146,49 @@ public void getPrivileges(Collection applications, Collection na } } + private boolean isSinglePrivilegeMatch(Collection applications, Collection names) { + return applications != null && applications.size() == 1 && hasWildcard(applications) == false && names != null && names.size() == 1; + } + + private boolean hasWildcard(Collection applications) { + return applications.stream().anyMatch(n -> n.endsWith("*")); + } + + private QueryBuilder getPrivilegeNameQuery(Collection names) { + return QueryBuilders.termsQuery(ApplicationPrivilegeDescriptor.Fields.NAME.getPreferredName(), names); + } + + private QueryBuilder getApplicationNameQuery(Collection applications) { + if (applications.contains("*")) { + return QueryBuilders.existsQuery(APPLICATION.getPreferredName()); + } + final List rawNames = new ArrayList<>(applications.size()); + final List wildcardNames = new ArrayList<>(applications.size()); + for (String name : applications) { + if (name.endsWith("*")) { + wildcardNames.add(name); + } else { + rawNames.add(name); + } + } + + assert rawNames.isEmpty() == false || wildcardNames.isEmpty() == false; + + TermsQueryBuilder termsQuery = rawNames.isEmpty() ? null : QueryBuilders.termsQuery(APPLICATION.getPreferredName(), rawNames); + if (wildcardNames.isEmpty()) { + return termsQuery; + } + final BoolQueryBuilder boolQuery = QueryBuilders.boolQuery(); + if (termsQuery != null) { + boolQuery.filter(termsQuery); + } + for (String wildcard : wildcardNames) { + final String prefix = wildcard.substring(0, wildcard.length() - 1); + boolQuery.filter(QueryBuilders.prefixQuery(APPLICATION.getPreferredName(), prefix)); + } + return boolQuery; + } + private static boolean isEmpty(Collection collection) { return collection == null || collection.isEmpty(); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index b82bf7f3c7fc2..c0ec72277d870 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -26,10 +26,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -69,7 +66,6 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; -import static org.elasticsearch.xpack.core.security.SecurityField.setting; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ROLE_TYPE; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; @@ -83,11 +79,6 @@ */ public class NativeRolesStore implements BiConsumer, ActionListener> { - // these are no longer used, but leave them around for users upgrading - private static final Setting CACHE_SIZE_SETTING = - Setting.intSetting(setting("authz.store.roles.index.cache.max_size"), 10000, Property.NodeScope, Property.Deprecated); - private static final Setting CACHE_TTL_SETTING = Setting.timeSetting(setting("authz.store.roles.index.cache.ttl"), - TimeValue.timeValueMinutes(20), Property.NodeScope, Property.Deprecated); private static final Logger logger = LogManager.getLogger(NativeRolesStore.class); private final Settings settings; @@ -413,11 +404,6 @@ static RoleDescriptor transformRole(String id, BytesReference sourceBytes, Logge } } - public static void addSettings(List> settings) { - settings.add(CACHE_SIZE_SETTING); - settings.add(CACHE_TTL_SETTING); - } - /** * Gets the document's id field for the given role name. */ diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 35180ab8f31d0..cc573fd9247f9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.license.TestUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -129,7 +130,7 @@ protected SSLService getSslService() { Client client = mock(Client.class); when(client.threadPool()).thenReturn(threadPool); when(client.settings()).thenReturn(settings); - return security.createComponents(client, threadPool, clusterService, mock(ResourceWatcherService.class)); + return security.createComponents(client, threadPool, clusterService, mock(ResourceWatcherService.class), mock(ScriptService.class)); } private static T findComponent(Class type, Collection components) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java index 91222a5af5845..ee5f935fcc56d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/rolemapping/TransportPutRoleMappingActionTests.java @@ -25,9 +25,10 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicReference; -import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.iterableWithSize; import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -72,7 +73,8 @@ public void testPutValidMapping() throws Exception { assertThat(mapping.getExpression(), is(expression)); assertThat(mapping.isEnabled(), equalTo(true)); assertThat(mapping.getName(), equalTo("anarchy")); - assertThat(mapping.getRoles(), containsInAnyOrder("superuser")); + assertThat(mapping.getRoles(), iterableWithSize(1)); + assertThat(mapping.getRoles(), contains("superuser")); assertThat(mapping.getMetadata().size(), equalTo(1)); assertThat(mapping.getMetadata().get("dumb"), equalTo(true)); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java index 43e5fb216399d..fe8220dad4e6e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/kerberos/KerberosRealmTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -146,7 +147,8 @@ protected NativeRoleMappingStore roleMappingStore(final List userNames) when(mockClient.threadPool()).thenReturn(threadPool); when(mockClient.settings()).thenReturn(settings); - final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, mockClient, mock(SecurityIndexManager.class)); + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, mockClient, mock(SecurityIndexManager.class), + mock(ScriptService.class)); final NativeRoleMappingStore roleMapper = spy(store); doAnswer(invocation -> { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java index c0a93d36ab89d..70e8719c0f797 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealmTests.java @@ -8,6 +8,8 @@ import com.unboundid.ldap.sdk.LDAPURL; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.SecureString; @@ -17,6 +19,9 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; @@ -29,11 +34,14 @@ import org.elasticsearch.xpack.core.security.authc.ldap.LdapUserSearchSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.PoolingSessionFactorySettings; import org.elasticsearch.xpack.core.security.authc.ldap.SearchGroupsResolverSettings; +import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapMetaDataResolverSettings; import org.elasticsearch.xpack.core.security.authc.ldap.support.LdapSearchScope; import org.elasticsearch.xpack.core.security.authc.support.CachingUsernamePasswordRealmSettings; import org.elasticsearch.xpack.core.security.authc.support.DelegatedAuthorizationSettings; import org.elasticsearch.xpack.core.security.authc.support.DnRoleMapperSettings; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; +import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLConfigurationSettings; import org.elasticsearch.xpack.core.ssl.SSLService; @@ -42,6 +50,8 @@ import org.elasticsearch.xpack.security.authc.ldap.support.SessionFactory; import org.elasticsearch.xpack.security.authc.support.DnRoleMapper; import org.elasticsearch.xpack.security.authc.support.MockLookupRealm; +import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.After; import org.junit.Before; @@ -54,6 +64,7 @@ import static org.elasticsearch.xpack.core.security.authc.RealmSettings.getFullSettingKey; import static org.elasticsearch.xpack.core.security.authc.ldap.support.SessionFactorySettings.URLS_SETTING; import static org.hamcrest.Matchers.arrayContaining; +import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -394,6 +405,75 @@ public void testLdapRealmMapsUserDNToRole() throws Exception { assertThat(user.roles(), arrayContaining("avenger")); } + /** + * This tests template role mappings (see + * {@link TemplateRoleName}) with an LDAP realm, using a additional + * metadata field (see {@link LdapMetaDataResolverSettings#ADDITIONAL_META_DATA_SETTING}). + */ + public void testLdapRealmWithTemplatedRoleMapping() throws Exception { + String groupSearchBase = "o=sevenSeas"; + String userTemplate = VALID_USER_TEMPLATE; + Settings settings = Settings.builder() + .put(defaultGlobalSettings) + .put(buildLdapSettings(ldapUrls(), userTemplate, groupSearchBase, LdapSearchScope.SUB_TREE)) + .put(getFullSettingKey(REALM_IDENTIFIER.getName(), LdapMetaDataResolverSettings.ADDITIONAL_META_DATA_SETTING), "uid") + .build(); + RealmConfig config = getRealmConfig(REALM_IDENTIFIER, settings); + + SecurityIndexManager mockSecurityIndex = mock(SecurityIndexManager.class); + when(mockSecurityIndex.isAvailable()).thenReturn(true); + when(mockSecurityIndex.isIndexUpToDate()).thenReturn(true); + when(mockSecurityIndex.isMappingUpToDate()).thenReturn(true); + + Client mockClient = mock(Client.class); + when(mockClient.threadPool()).thenReturn(threadPool); + + final ScriptService scriptService = new ScriptService(defaultGlobalSettings, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), ScriptModule.CORE_CONTEXTS); + NativeRoleMappingStore roleMapper = new NativeRoleMappingStore(defaultGlobalSettings, mockClient, mockSecurityIndex, + scriptService) { + @Override + protected void loadMappings(ActionListener> listener) { + listener.onResponse( + Arrays.asList( + this.buildMapping("m1", new BytesArray("{" + + "\"role_templates\":[{\"template\":{\"source\":\"_user_{{metadata.uid}}\"}}]," + + "\"enabled\":true," + + "\"rules\":{ \"any\":[" + + " { \"field\":{\"realm.name\":\"ldap1\"}}," + + " { \"field\":{\"realm.name\":\"ldap2\"}}" + + "]}}")), + this.buildMapping("m2", new BytesArray("{" + + "\"roles\":[\"should_not_happen\"]," + + "\"enabled\":true," + + "\"rules\":{ \"all\":[" + + " { \"field\":{\"realm.name\":\"ldap1\"}}," + + " { \"field\":{\"realm.name\":\"ldap2\"}}" + + "]}}")), + this.buildMapping("m3", new BytesArray("{" + + "\"roles\":[\"sales_admin\"]," + + "\"enabled\":true," + + "\"rules\":" + + " { \"field\":{\"dn\":\"*,ou=people,o=sevenSeas\"}}" + + "}")) + ) + ); + } + }; + LdapSessionFactory ldapFactory = new LdapSessionFactory(config, sslService, threadPool); + LdapRealm ldap = new LdapRealm(config, ldapFactory, + roleMapper, threadPool); + ldap.initialize(Collections.singleton(ldap), licenseState); + + PlainActionFuture future = new PlainActionFuture<>(); + ldap.authenticate(new UsernamePasswordToken("Horatio Hornblower", new SecureString(PASSWORD)), future); + final AuthenticationResult result = future.actionGet(); + assertThat(result.getStatus(), is(AuthenticationResult.Status.SUCCESS)); + User user = result.getUser(); + assertThat(user, notNullValue()); + assertThat(user.roles(), arrayContainingInAnyOrder("_user_hhornblo", "sales_admin")); + } + /** * The contract for {@link Realm} implementations is that they should log-and-return-null (and * not call {@link ActionListener#onFailure(Exception)}) if there is an internal exception that prevented them from performing an diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 729bd08d7faf3..276d8a333f796 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -5,31 +5,48 @@ */ package org.elasticsearch.xpack.security.authc.support.mapper; +import org.elasticsearch.Version; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.xpack.core.XPackClientPlugin; import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression; +import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.hamcrest.Matchers; import org.junit.Before; import org.mockito.Mockito; import java.io.IOException; +import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Locale; -import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.iterableWithSize; import static org.hamcrest.Matchers.notNullValue; public class ExpressionRoleMappingTests extends ESTestCase { @@ -39,44 +56,44 @@ public class ExpressionRoleMappingTests extends ESTestCase { @Before public void setupMapping() throws Exception { realm = new RealmConfig(new RealmConfig.RealmIdentifier("ldap", "ldap1"), - Settings.EMPTY, Mockito.mock(Environment.class), new ThreadContext(Settings.EMPTY)); + Settings.EMPTY, Mockito.mock(Environment.class), new ThreadContext(Settings.EMPTY)); } - public void testParseValidJson() throws Exception { + public void testParseValidJsonWithFixedRoleNames() throws Exception { String json = "{" - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"enabled\": true, " - + "\"rules\": { " - + " \"all\": [ " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " - + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" - + " ]}" - + "}"; + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"enabled\": true, " + + "\"rules\": { " + + " \"all\": [ " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" + + " ]}" + + "}"; final ExpressionRoleMapping mapping = parse(json, "ldap_sales"); assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("kibana_user", "sales")); assertThat(mapping.getExpression(), instanceOf(AllExpression.class)); final UserRoleMapper.UserData user1a = new UserRoleMapper.UserData( - "john.smith", "cn=john.smith,ou=sales,dc=example,dc=com", - Collections.emptyList(), Collections.singletonMap("active", true), realm + "john.smith", "cn=john.smith,ou=sales,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", true), realm ); final UserRoleMapper.UserData user1b = new UserRoleMapper.UserData( - user1a.getUsername(), user1a.getDn().toUpperCase(Locale.US), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + user1a.getUsername(), user1a.getDn().toUpperCase(Locale.US), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() ); final UserRoleMapper.UserData user1c = new UserRoleMapper.UserData( - user1a.getUsername(), user1a.getDn().replaceAll(",", ", "), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + user1a.getUsername(), user1a.getDn().replaceAll(",", ", "), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() ); final UserRoleMapper.UserData user1d = new UserRoleMapper.UserData( - user1a.getUsername(), user1a.getDn().replaceAll("dc=", "DC="), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() + user1a.getUsername(), user1a.getDn().replaceAll("dc=", "DC="), user1a.getGroups(), user1a.getMetadata(), user1a.getRealm() ); final UserRoleMapper.UserData user2 = new UserRoleMapper.UserData( - "jamie.perez", "cn=jamie.perez,ou=sales,dc=example,dc=com", - Collections.emptyList(), Collections.singletonMap("active", false), realm + "jamie.perez", "cn=jamie.perez,ou=sales,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", false), realm ); final UserRoleMapper.UserData user3 = new UserRoleMapper.UserData( - "simone.ng", "cn=simone.ng,ou=finance,dc=example,dc=com", - Collections.emptyList(), Collections.singletonMap("active", true), realm + "simone.ng", "cn=simone.ng,ou=finance,dc=example,dc=com", + Collections.emptyList(), Collections.singletonMap("active", true), realm ); assertThat(mapping.getExpression().match(user1a.asModel()), equalTo(true)); @@ -87,58 +104,218 @@ public void testParseValidJson() throws Exception { assertThat(mapping.getExpression().match(user3.asModel()), equalTo(false)); } + public void testParseValidJsonWithTemplatedRoleNames() throws Exception { + String json = "{" + + "\"role_templates\": [ " + + " { \"template\" : { \"source\":\"kibana_user\"} }," + + " { \"template\" : { \"source\":\"sales\"} }," + + " { \"template\" : { \"source\":\"_user_{{username}}\" }, \"format\":\"string\" }" + + " ], " + + "\"enabled\": true, " + + "\"rules\": { " + + " \"all\": [ " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + " { \"except\": { \"field\": { \"metadata.active\" : false } } }" + + " ]}" + + "}"; + final ExpressionRoleMapping mapping = parse(json, "ldap_sales"); + assertThat(mapping.getRoleTemplates(), iterableWithSize(3)); + assertThat(mapping.getRoleTemplates().get(0).getTemplate().utf8ToString(), equalTo("{\"source\":\"kibana_user\"}")); + assertThat(mapping.getRoleTemplates().get(0).getFormat(), equalTo(TemplateRoleName.Format.STRING)); + assertThat(mapping.getRoleTemplates().get(1).getTemplate().utf8ToString(), equalTo("{\"source\":\"sales\"}")); + assertThat(mapping.getRoleTemplates().get(1).getFormat(), equalTo(TemplateRoleName.Format.STRING)); + assertThat(mapping.getRoleTemplates().get(2).getTemplate().utf8ToString(), equalTo("{\"source\":\"_user_{{username}}\"}")); + assertThat(mapping.getRoleTemplates().get(2).getFormat(), equalTo(TemplateRoleName.Format.STRING)); + } + public void testParsingFailsIfRulesAreMissing() throws Exception { String json = "{" - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"enabled\": true " - + "}"; + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"enabled\": true " + + "}"; ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); assertThat(ex.getMessage(), containsString("rules")); } public void testParsingFailsIfRolesMissing() throws Exception { String json = "{" - + "\"enabled\": true, " - + "\"rules\": " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " - + "}"; + + "\"enabled\": true, " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " + + "}"; ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); assertThat(ex.getMessage(), containsString("role")); } public void testParsingFailsIfThereAreUnrecognisedFields() throws Exception { String json = "{" - + "\"disabled\": false, " - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"rules\": " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " - + "}"; + + "\"disabled\": false, " + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } } " + + "}"; ParsingException ex = expectThrows(ParsingException.class, () -> parse(json, "bad_json")); assertThat(ex.getMessage(), containsString("disabled")); } public void testParsingIgnoresTypeFields() throws Exception { String json = "{" - + "\"enabled\": true, " - + "\"roles\": [ \"kibana_user\", \"sales\" ], " - + "\"rules\": " - + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " - + "\"doc_type\": \"role-mapping\", " - + "\"type\": \"doc\"" - + "}"; - final ExpressionRoleMapping mapping = parse(json, "from_index"); + + "\"enabled\": true, " + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }, " + + "\"doc_type\": \"role-mapping\", " + + "\"type\": \"doc\"" + + "}"; + final ExpressionRoleMapping mapping = parse(json, "from_index", true); assertThat(mapping.isEnabled(), equalTo(true)); - assertThat(mapping.getRoles(), containsInAnyOrder("kibana_user", "sales")); + assertThat(mapping.getRoles(), Matchers.containsInAnyOrder("kibana_user", "sales")); + } + + public void testParsingOfBothRoleNamesAndTemplates() throws Exception { + String json = "{" + + "\"enabled\": true, " + + "\"roles\": [ \"kibana_user\", \"sales\" ], " + + "\"role_templates\": [" + + " { \"template\" : \"{ \\\"source\\\":\\\"_user_{{username}}\\\" }\", \"format\":\"string\" }" + + "]," + + "\"rules\": " + + " { \"field\": { \"dn\" : \"*,ou=sales,dc=example,dc=com\" } }" + + "}"; + + // This is rejected when validating a request, but is valid when parsing the mapping + final ExpressionRoleMapping mapping = parse(json, "from_api", false); + assertThat(mapping.getRoles(), iterableWithSize(2)); + assertThat(mapping.getRoleTemplates(), iterableWithSize(1)); + } + + public void testToXContentWithRoleNames() throws Exception { + String source = "{" + + "\"roles\": [ " + + " \"kibana_user\"," + + " \"sales\"" + + " ], " + + "\"enabled\": true, " + + "\"rules\": { \"field\": { \"realm.name\" : \"saml1\" } }" + + "}"; + final ExpressionRoleMapping mapping = parse(source, getTestName()); + assertThat(mapping.getRoles(), iterableWithSize(2)); + + final String xcontent = Strings.toString(mapping); + assertThat(xcontent, equalTo( + "{" + + "\"enabled\":true," + + "\"roles\":[" + + "\"kibana_user\"," + + "\"sales\"" + + "]," + + "\"rules\":{\"field\":{\"realm.name\":\"saml1\"}}," + + "\"metadata\":{}" + + "}" + )); + } + + public void testToXContentWithTemplates() throws Exception { + String source = "{" + + "\"metadata\" : { \"answer\":42 }," + + "\"role_templates\": [ " + + " { \"template\" : { \"source\":\"_user_{{username}}\" }, \"format\":\"string\" }," + + " { \"template\" : { \"source\":\"{{#tojson}}groups{{/tojson}}\" }, \"format\":\"json\" }" + + " ], " + + "\"enabled\": false, " + + "\"rules\": { \"field\": { \"realm.name\" : \"saml1\" } }" + + "}"; + final ExpressionRoleMapping mapping = parse(source, getTestName()); + assertThat(mapping.getRoleTemplates(), iterableWithSize(2)); + + final String xcontent = Strings.toString(mapping.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS, true)); + assertThat(xcontent, equalTo( + "{" + + "\"enabled\":false," + + "\"role_templates\":[" + + "{\"template\":\"{\\\"source\\\":\\\"_user_{{username}}\\\"}\",\"format\":\"string\"}," + + "{\"template\":\"{\\\"source\\\":\\\"{{#tojson}}groups{{/tojson}}\\\"}\",\"format\":\"json\"}" + + "]," + + "\"rules\":{\"field\":{\"realm.name\":\"saml1\"}}," + + "\"metadata\":{\"answer\":42}," + + "\"doc_type\":\"role-mapping\"" + + "}" + )); + + final ExpressionRoleMapping parsed = parse(xcontent, getTestName(), true); + assertThat(parsed.getRoles(), iterableWithSize(0)); + assertThat(parsed.getRoleTemplates(), iterableWithSize(2)); + assertThat(parsed.getMetadata(), Matchers.hasKey("answer")); + } + + public void testSerialization() throws Exception { + final ExpressionRoleMapping original = randomRoleMapping(true); + + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_8_0_0, null); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(version); + original.writeTo(output); + + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); + StreamInput streamInput = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), + registry); + streamInput.setVersion(version); + final ExpressionRoleMapping serialized = new ExpressionRoleMapping(streamInput); + assertEquals(original, serialized); + } + + public void testSerializationPreV71() throws Exception { + final ExpressionRoleMapping original = randomRoleMapping(false); + + final Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0); + BytesStreamOutput output = new BytesStreamOutput(); + output.setVersion(version); + original.writeTo(output); + + final NamedWriteableRegistry registry = new NamedWriteableRegistry(new XPackClientPlugin(Settings.EMPTY).getNamedWriteables()); + StreamInput streamInput = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(output.bytes())), + registry); + streamInput.setVersion(version); + final ExpressionRoleMapping serialized = new ExpressionRoleMapping(streamInput); + assertEquals(original, serialized); } private ExpressionRoleMapping parse(String json, String name) throws IOException { + return parse(json, name, false); + } + + private ExpressionRoleMapping parse(String json, String name, boolean fromIndex) throws IOException { final NamedXContentRegistry registry = NamedXContentRegistry.EMPTY; final XContentParser parser = XContentType.JSON.xContent() - .createParser(registry, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); + .createParser(registry, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, json); final ExpressionRoleMapping mapping = ExpressionRoleMapping.parse(name, parser); assertThat(mapping, notNullValue()); assertThat(mapping.getName(), equalTo(name)); return mapping; } + private ExpressionRoleMapping randomRoleMapping(boolean acceptRoleTemplates) { + final boolean useTemplate = acceptRoleTemplates && randomBoolean(); + final List roles; + final List templates; + if (useTemplate) { + roles = Collections.emptyList(); + templates = Arrays.asList(randomArray(1, 5, TemplateRoleName[]::new, () -> + new TemplateRoleName(new BytesArray(randomAlphaOfLengthBetween(10, 25)), randomFrom(TemplateRoleName.Format.values())) + )); + } else { + roles = Arrays.asList(randomArray(1, 5, String[]::new, () -> randomAlphaOfLengthBetween(4, 12))); + templates = Collections.emptyList(); + } + return new ExpressionRoleMapping( + randomAlphaOfLengthBetween(3, 8), + new FieldExpression(randomAlphaOfLengthBetween(4, 12), + Collections.singletonList(new FieldExpression.FieldValue(randomInt(99)))), + roles, + templates, + Collections.singletonMap(randomAlphaOfLengthBetween(3, 12), randomIntBetween(30, 90)), + true + ); + } + } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 29407a8672982..e96284ba1549a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -10,10 +10,14 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.mustache.MustacheScriptEngine; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; @@ -23,6 +27,7 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; +import org.elasticsearch.xpack.core.security.authc.support.mapper.TemplateRoleName; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; import org.elasticsearch.xpack.core.security.index.RestrictedIndicesNames; @@ -54,12 +59,12 @@ public void testResolveRoles() throws Exception { // Does match DN final ExpressionRoleMapping mapping1 = new ExpressionRoleMapping("dept_h", new FieldExpression("dn", Collections.singletonList(new FieldValue("*,ou=dept_h,o=forces,dc=gc,dc=ca"))), - Arrays.asList("dept_h", "defence"), Collections.emptyMap(), true); + Arrays.asList("dept_h", "defence"), Collections.emptyList(), Collections.emptyMap(), true); // Does not match - user is not in this group final ExpressionRoleMapping mapping2 = new ExpressionRoleMapping("admin", - new FieldExpression("groups", Collections.singletonList( - new FieldValue(randomiseDn("cn=esadmin,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))), - Arrays.asList("admin"), Collections.emptyMap(), true); + new FieldExpression("groups", Collections.singletonList( + new FieldValue(randomiseDn("cn=esadmin,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))), + Arrays.asList("admin"), Collections.emptyList(), Collections.emptyMap(), true); // Does match - user is one of these groups final ExpressionRoleMapping mapping3 = new ExpressionRoleMapping("flight", new FieldExpression("groups", Arrays.asList( @@ -67,18 +72,23 @@ public void testResolveRoles() throws Exception { new FieldValue(randomiseDn("cn=betaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")), new FieldValue(randomiseDn("cn=gammaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")) )), - Arrays.asList("flight"), Collections.emptyMap(), true); + Collections.emptyList(), + Arrays.asList(new TemplateRoleName(new BytesArray("{ \"source\":\"{{metadata.extra_group}}\" }"), + TemplateRoleName.Format.STRING)), + Collections.emptyMap(), true); // Does not match - mapping is not enabled final ExpressionRoleMapping mapping4 = new ExpressionRoleMapping("mutants", new FieldExpression("groups", Collections.singletonList( new FieldValue(randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca")))), - Arrays.asList("mutants"), Collections.emptyMap(), false); + Arrays.asList("mutants"), Collections.emptyList(), Collections.emptyMap(), false); final Client client = mock(Client.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); + ScriptService scriptService = new ScriptService(Settings.EMPTY, + Collections.singletonMap(MustacheScriptEngine.NAME, new MustacheScriptEngine()), ScriptModule.CORE_CONTEXTS); when(securityIndex.isAvailable()).thenReturn(true); - final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, securityIndex) { + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, securityIndex, scriptService) { @Override protected void loadMappings(ActionListener> listener) { final List mappings = Arrays.asList(mapping1, mapping2, mapping3, mapping4); @@ -96,7 +106,7 @@ protected void loadMappings(ActionListener> listener Arrays.asList( randomiseDn("cn=alphaflight,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca"), randomiseDn("cn=mutants,ou=groups,ou=dept_h,o=forces,dc=gc,dc=ca") - ), Collections.emptyMap(), realm); + ), Collections.singletonMap("extra_group", "flight"), realm); logger.info("UserData is [{}]", user); store.resolveRoles(user, future); @@ -213,7 +223,8 @@ protected void doLookupUser(String username, ActionListener listener) { listener.onResponse(null); } }; - final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityIndexManager.class)); + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityIndexManager.class), + mock(ScriptService.class)); store.refreshRealmOnChange(mockRealm); return store; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java index 8f60b1d30523f..a833748854943 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativePrivilegeStoreTests.java @@ -181,6 +181,45 @@ public void testGetPrivilegesByApplicationName() throws Exception { assertResult(sourcePrivileges, future); } + public void testGetPrivilegesByWildcardApplicationName() throws Exception { + final PlainActionFuture> future = new PlainActionFuture<>(); + store.getPrivileges(Arrays.asList("myapp-*", "yourapp"), null, future); + assertThat(requests, iterableWithSize(1)); + assertThat(requests.get(0), instanceOf(SearchRequest.class)); + SearchRequest request = (SearchRequest) requests.get(0); + assertThat(request.indices(), arrayContaining(SecurityIndexManager.SECURITY_INDEX_NAME)); + + final String query = Strings.toString(request.source().query()); + assertThat(query, containsString("{\"bool\":{\"filter\":[{\"terms\":{\"application\":[\"yourapp\"]")); + assertThat(query, containsString("{\"prefix\":{\"application\":{\"value\":\"myapp-\"")); + assertThat(query, containsString("{\"term\":{\"type\":{\"value\":\"application-privilege\"")); + + final SearchHit[] hits = new SearchHit[0]; + listener.get().onResponse(new SearchResponse(new SearchResponseSections( + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, null, false, false, null, 1), + "_scrollId1", 1, 1, 0, 1, null, null)); + } + + public void testGetPrivilegesByStarApplicationName() throws Exception { + final PlainActionFuture> future = new PlainActionFuture<>(); + store.getPrivileges(Arrays.asList("*", "anything"), null, future); + assertThat(requests, iterableWithSize(1)); + assertThat(requests.get(0), instanceOf(SearchRequest.class)); + SearchRequest request = (SearchRequest) requests.get(0); + assertThat(request.indices(), arrayContaining(SecurityIndexManager.SECURITY_INDEX_NAME)); + + final String query = Strings.toString(request.source().query()); + assertThat(query, containsString("{\"exists\":{\"field\":\"application\"")); + assertThat(query, containsString("{\"term\":{\"type\":{\"value\":\"application-privilege\"")); + + final SearchHit[] hits = new SearchHit[0]; + listener.get().onResponse(new SearchResponse(new SearchResponseSections( + new SearchHits(hits, new TotalHits(hits.length, TotalHits.Relation.EQUAL_TO), 0f), + null, null, false, false, null, 1), + "_scrollId1", 1, 1, 0, 1, null, null)); + } + public void testGetAllPrivileges() throws Exception { final List sourcePrivileges = Arrays.asList( new ApplicationPrivilegeDescriptor("app1", "admin", newHashSet("action:admin/*", "action:login", "data:read/*"), emptyMap()), diff --git a/x-pack/plugin/sql/build.gradle b/x-pack/plugin/sql/build.gradle index e0250f8ad9ec4..31cec8bbee39f 100644 --- a/x-pack/plugin/sql/build.gradle +++ b/x-pack/plugin/sql/build.gradle @@ -14,6 +14,10 @@ ext { // SQL dependency versions jlineVersion="3.10.0" antlrVersion="4.5.3" + + // SQL test dependency versions + csvjdbcVersion="1.0.34" + h2Version="1.4.197" } configurations { diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java index 5697453730455..e69d5b0201319 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDatabaseMetaData.java @@ -257,8 +257,7 @@ public boolean nullPlusNonNullIsNull() throws SQLException { @Override public boolean supportsConvert() throws SQLException { - //TODO: add Convert - return false; + return true; } @Override @@ -774,14 +773,14 @@ public ResultSet getSchemas(String catalog, String schemaPattern) throws SQLExce @Override public ResultSet getCatalogs() throws SQLException { // TABLE_CAT is the first column - Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '%'", 1); + Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '%' LIKE ''", 1); return memorySet(con.cfg, columnInfo("", "TABLE_CAT"), data); } @Override public ResultSet getTableTypes() throws SQLException { // TABLE_TYPE (4) - Object[][] data = queryColumn(con, "SYS TABLES TYPE '%'", 4); + Object[][] data = queryColumn(con, "SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", 4); return memorySet(con.cfg, columnInfo("", "TABLE_TYPE"), data); } diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java index b4210f2c44d3f..c0f2e6e46ea03 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/JdbcDateUtils.java @@ -9,6 +9,7 @@ import java.sql.Date; import java.sql.Time; import java.sql.Timestamp; +import java.time.LocalDate; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; @@ -27,10 +28,9 @@ */ final class JdbcDateUtils { - private JdbcDateUtils() { - } + private JdbcDateUtils() {} - private static final long DAY_IN_MILLIS = 60 * 60 * 24 * 1000L; + private static final LocalDate EPOCH = LocalDate.of(1970, 1, 1); static final DateTimeFormatter ISO_WITH_MILLIS = new DateTimeFormatterBuilder() .parseCaseInsensitive() @@ -58,20 +58,9 @@ static Date asDate(String date) { return new Date(zdt.toLocalDate().atStartOfDay(zdt.getZone()).toInstant().toEpochMilli()); } - /** - * In contrast to {@link JdbcDateUtils#asDate(String)} here we just want to eliminate - * the date part and just set it to EPOCH (1970-01-1) - */ - static Time asTime(long millisSinceEpoch) { - return new Time(utcMillisRemoveDate(millisSinceEpoch)); - } - - /** - * In contrast to {@link JdbcDateUtils#asDate(String)} here we just want to eliminate - * the date part and just set it to EPOCH (1970-01-1) - */ static Time asTime(String date) { - return asTime(asMillisSinceEpoch(date)); + ZonedDateTime zdt = asDateTime(date); + return new Time(zdt.toLocalTime().atDate(EPOCH).atZone(zdt.getZone()).toInstant().toEpochMilli()); } static Timestamp asTimestamp(long millisSinceEpoch) { @@ -93,8 +82,4 @@ static R asDateTimeField(Object value, Function asDateTimeMethod, return ctor.apply(((Number) value).longValue()); } } - - private static long utcMillisRemoveDate(long l) { - return l % DAY_IN_MILLIS; - } } diff --git a/x-pack/plugin/sql/qa/build.gradle b/x-pack/plugin/sql/qa/build.gradle index a2c209f25787f..cf0a0dba8ee62 100644 --- a/x-pack/plugin/sql/qa/build.gradle +++ b/x-pack/plugin/sql/qa/build.gradle @@ -12,7 +12,7 @@ dependencies { compile project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') compile project(path: xpackModule('sql:sql-action')) - compile "net.sourceforge.csvjdbc:csvjdbc:1.0.34" + compile "net.sourceforge.csvjdbc:csvjdbc:${csvjdbcVersion}" // CLI testing dependencies compile project(path: xpackModule('sql:sql-cli'), configuration: 'nodeps') @@ -54,8 +54,8 @@ subprojects { testCompile "org.elasticsearch.test:framework:${version}" // JDBC testing dependencies - testRuntime "net.sourceforge.csvjdbc:csvjdbc:1.0.34" - testRuntime "com.h2database:h2:1.4.197" + testRuntime "net.sourceforge.csvjdbc:csvjdbc:${csvjdbcVersion}" + testRuntime "com.h2database:h2:${h2Version}" testRuntime project(path: xpackModule('sql:jdbc'), configuration: 'nodeps') testRuntime xpackProject('plugin:sql:sql-client') diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java index d5e7e7cc5084c..6a4a2662810e3 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/RestSqlSecurityIT.java @@ -29,10 +29,10 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; -import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -101,9 +101,9 @@ public void expectDescribe(Map> columns, String user) throw String mode = randomMode(); Map expected = new HashMap<>(3); expected.put("columns", Arrays.asList( - columnInfo(mode, "column", "keyword", JDBCType.VARCHAR, 0), - columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 0), - columnInfo(mode, "mapping", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "column", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 32766), + columnInfo(mode, "mapping", "keyword", JDBCType.VARCHAR, 32766))); List> rows = new ArrayList<>(columns.size()); for (Map.Entry> column : columns.entrySet()) { List cols = new ArrayList<>(); @@ -120,8 +120,8 @@ public void expectDescribe(Map> columns, String user) throw public void expectShowTables(List tables, String user) throws Exception { String mode = randomMode(); List columns = new ArrayList<>(); - columns.add(columnInfo(mode, "name", "keyword", JDBCType.VARCHAR, 0)); - columns.add(columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 0)); + columns.add(columnInfo(mode, "name", "keyword", JDBCType.VARCHAR, 32766)); + columns.add(columnInfo(mode, "type", "keyword", JDBCType.VARCHAR, 32766)); Map expected = new HashMap<>(); expected.put("columns", columns); List> rows = new ArrayList<>(); diff --git a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java index 2d93597efc108..65eb991280ff1 100644 --- a/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java +++ b/x-pack/plugin/sql/qa/security/src/test/java/org/elasticsearch/xpack/sql/qa/security/UserFunctionIT.java @@ -34,10 +34,10 @@ import java.util.List; import java.util.Map; +import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.columnInfo; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.randomMode; -import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.SQL_QUERY_REST_ENDPOINT; public class UserFunctionIT extends ESRestTestCase { @@ -81,7 +81,7 @@ public void testSingleRandomUser() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Arrays.asList(Arrays.asList(randomUserName))); Map actual = runSql(randomUserName, mode, SQL); @@ -97,7 +97,7 @@ public void testSingleRandomUserWithWhereEvaluatingTrue() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Arrays.asList(Arrays.asList(randomUserName), Arrays.asList(randomUserName), Arrays.asList(randomUserName))); @@ -114,7 +114,7 @@ public void testSingleRandomUserWithWhereEvaluatingFalse() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Collections.>emptyList()); String anotherRandomUserName = randomValueOtherThan(randomUserName, () -> randomAlphaOfLengthBetween(1, 15)); Map actual = runSql(randomUserName, mode, SQL + " FROM test WHERE USER()='" + anotherRandomUserName + "' LIMIT 3"); @@ -129,7 +129,7 @@ public void testMultipleRandomUsersAccess() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Arrays.asList(Arrays.asList(randomlyPickedUsername))); Map actual = runSql(randomlyPickedUsername, mode, SQL); @@ -147,7 +147,7 @@ public void testSingleUserSelectFromIndex() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 0))); + columnInfo(mode, "USER()", "keyword", JDBCType.VARCHAR, 32766))); expected.put("rows", Arrays.asList(Arrays.asList(randomUserName), Arrays.asList(randomUserName), Arrays.asList(randomUserName))); diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java index 233c4b6a42024..1a47bb0add85b 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/SqlProtocolTestCase.java @@ -28,10 +28,10 @@ import java.util.Locale; import java.util.Map; +import static org.elasticsearch.xpack.sql.proto.Mode.CLI; import static org.elasticsearch.xpack.sql.proto.Protocol.SQL_QUERY_REST_ENDPOINT; import static org.elasticsearch.xpack.sql.proto.RequestInfo.CLIENT_IDS; import static org.elasticsearch.xpack.sql.qa.rest.RestSqlTestCase.mode; -import static org.elasticsearch.xpack.sql.proto.Mode.CLI; public abstract class SqlProtocolTestCase extends ESRestTestCase { @@ -62,7 +62,7 @@ public void testNumericTypes() throws IOException { } public void testTextualType() throws IOException { - assertQuery("SELECT 'abc123'", "'abc123'", "keyword", "abc123", 0); + assertQuery("SELECT 'abc123'", "'abc123'", "keyword", "abc123", 32766); } public void testDateTimes() throws IOException { @@ -141,7 +141,7 @@ private void assertQuery(String sql, String columnName, String columnType, Objec List row = (ArrayList) rows.get(0); assertEquals(1, row.size()); - // from xcontent we can get float or double, depending on the conversion + // from xcontent we can get float or double, depending on the conversion // method of the specific xcontent format implementation if (columnValue instanceof Float && row.get(0) instanceof Double) { assertEquals(columnValue, (float)((Number) row.get(0)).doubleValue()); @@ -209,7 +209,7 @@ private Map runSql(String mode, String sql, boolean columnar) th return XContentHelper.convertToMap(SmileXContent.smileXContent, content, false); } default: - return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); + return XContentHelper.convertToMap(JsonXContent.jsonXContent, content, false); } } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java index 6896c76aff004..19c30b55e92b1 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/JdbcTestUtils.java @@ -23,10 +23,13 @@ import java.nio.file.Path; import java.nio.file.SimpleFileVisitor; import java.nio.file.attribute.BasicFileAttributes; +import java.sql.Date; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; +import java.sql.Time; import java.time.Instant; +import java.time.LocalDate; import java.time.ZoneId; import java.time.ZonedDateTime; import java.util.ArrayList; @@ -37,15 +40,17 @@ import static org.elasticsearch.xpack.sql.action.BasicFormatter.FormatOption.CLI; -public abstract class JdbcTestUtils { +final class JdbcTestUtils { - public static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; + private JdbcTestUtils() {} - public static final String JDBC_TIMEZONE = "timezone"; - - public static ZoneId UTC = ZoneId.of("Z"); + private static final int MAX_WIDTH = 20; + + static final String SQL_TRACE = "org.elasticsearch.xpack.sql:TRACE"; + static final String JDBC_TIMEZONE = "timezone"; + static final LocalDate EPOCH = LocalDate.of(1970, 1, 1); - public static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLException { + static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLException { ResultSetMetaData metaData = rs.getMetaData(); // header StringBuilder sb = new StringBuilder(); @@ -75,35 +80,24 @@ public static void logResultSetMetadata(ResultSet rs, Logger logger) throws SQLE logger.info(sb.toString()); } - private static final int MAX_WIDTH = 20; - - public static void logResultSetData(ResultSet rs, Logger log) throws SQLException { + static void logResultSetData(ResultSet rs, Logger log) throws SQLException { ResultSetMetaData metaData = rs.getMetaData(); - StringBuilder sb = new StringBuilder(); - StringBuilder column = new StringBuilder(); int columns = metaData.getColumnCount(); while (rs.next()) { - sb.setLength(0); - for (int i = 1; i <= columns; i++) { - column.setLength(0); - if (i > 1) { - sb.append(" | "); - } - sb.append(trimOrPad(column.append(rs.getString(i)))); - } - log.info(sb); + log.info(rowAsString(rs, columns)); } } - public static String resultSetCurrentData(ResultSet rs) throws SQLException { + static String resultSetCurrentData(ResultSet rs) throws SQLException { ResultSetMetaData metaData = rs.getMetaData(); - StringBuilder column = new StringBuilder(); - - int columns = metaData.getColumnCount(); + return rowAsString(rs, metaData.getColumnCount()); + } + private static String rowAsString(ResultSet rs, int columns) throws SQLException { StringBuilder sb = new StringBuilder(); + StringBuilder column = new StringBuilder(); for (int i = 1; i <= columns; i++) { column.setLength(0); if (i > 1) { @@ -153,7 +147,7 @@ public static void logLikeCLI(ResultSet rs, Logger logger) throws SQLException { logger.info("\n" + formatter.formatWithHeader(cols, data)); } - public static String of(long millis, String zoneId) { + static String of(long millis, String zoneId) { return StringUtils.toString(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), ZoneId.of(zoneId))); } @@ -165,7 +159,7 @@ public static String of(long millis, String zoneId) { * folders in the file-system (typically IDEs) or * inside jars (gradle). */ - public static List classpathResources(String pattern) throws Exception { + static List classpathResources(String pattern) throws Exception { while (pattern.startsWith("/")) { pattern = pattern.substring(1); } @@ -234,4 +228,15 @@ static Tuple pathAndName(String string) { } return new Tuple<>(folder, file); } -} \ No newline at end of file + + static Date asDate(long millis, ZoneId zoneId) { + return new java.sql.Date( + ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) + .toLocalDate().atStartOfDay(zoneId).toInstant().toEpochMilli()); + } + + static Time asTime(long millis, ZoneId zoneId) { + return new Time(ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), zoneId) + .toLocalTime().atDate(JdbcTestUtils.EPOCH).atZone(zoneId).toInstant().toEpochMilli()); + } +} diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java index f9bc90a093e69..b8cd81e39f545 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/jdbc/ResultSetTestCase.java @@ -34,7 +34,6 @@ import java.sql.Types; import java.time.Instant; import java.time.ZoneId; -import java.time.ZonedDateTime; import java.util.Arrays; import java.util.Calendar; import java.util.Date; @@ -61,6 +60,8 @@ import static java.util.Calendar.SECOND; import static java.util.Calendar.YEAR; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.JDBC_TIMEZONE; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asDate; +import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.asTime; import static org.elasticsearch.xpack.sql.qa.jdbc.JdbcTestUtils.of; public class ResultSetTestCase extends JdbcIntegrationTestCase { @@ -880,10 +881,7 @@ public void testGettingDateWithoutCalendar() throws Exception { doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); - ZoneId zoneId = ZoneId.of(timeZoneId); - java.sql.Date expectedDate = new java.sql.Date( - ZonedDateTime.ofInstant(Instant.ofEpochMilli(randomLongDate), zoneId) - .toLocalDate().atStartOfDay(zoneId).toInstant().toEpochMilli()); + java.sql.Date expectedDate = asDate(randomLongDate, getZoneFromOffset(randomLongDate)); assertEquals(expectedDate, results.getDate("test_date")); assertEquals(expectedDate, results.getDate(9)); @@ -939,11 +937,11 @@ public void testGettingTimeWithoutCalendar() throws Exception { }); Long randomLongDate = randomNonNegativeLong(); indexSimpleDocumentWithTrueValues(randomLongDate); - + doWithQuery(SELECT_ALL_FIELDS, (results) -> { results.next(); - java.sql.Time expectedTime = new java.sql.Time(randomLongDate % 86400000L); + java.sql.Time expectedTime = asTime(randomLongDate, getZoneFromOffset(randomLongDate)); assertEquals(expectedTime, results.getTime("test_date")); assertEquals(expectedTime, results.getTime(9)); @@ -953,7 +951,7 @@ public void testGettingTimeWithoutCalendar() throws Exception { validateErrorsForTimeTestsWithoutCalendar(results::getTime); }); } - + public void testGettingTimeWithCalendar() throws Exception { createIndex("test"); updateMappingForNumericValuesTests("test"); @@ -1748,4 +1746,8 @@ private Connection esWithLeniency(boolean multiValueLeniency) throws SQLExceptio private String asDateString(long millis) { return of(millis, timeZoneId); } + + private ZoneId getZoneFromOffset(Long randomLongDate) { + return ZoneId.of(ZoneId.of(timeZoneId).getRules().getOffset(Instant.ofEpochMilli(randomLongDate)).toString()); + } } diff --git a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java index afcabeedf59ee..c88f31bb2fd71 100644 --- a/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java +++ b/x-pack/plugin/sql/qa/src/main/java/org/elasticsearch/xpack/sql/qa/rest/RestSqlTestCase.java @@ -74,7 +74,7 @@ public void testBasicQuery() throws IOException { String mode = randomMode(); boolean columnar = randomBoolean(); - expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0))); + expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, Integer.MAX_VALUE))); if (columnar) { expected.put("values", singletonList(Arrays.asList("test", "test"))); } else { @@ -118,7 +118,7 @@ public void testNextPage() throws IOException { Map expected = new HashMap<>(); if (i == 0) { expected.put("columns", Arrays.asList( - columnInfo(mode, "text", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "text", "text", JDBCType.VARCHAR, Integer.MAX_VALUE), columnInfo(mode, "number", "long", JDBCType.BIGINT, 20), columnInfo(mode, "s", "double", JDBCType.DOUBLE, 25), columnInfo(mode, "SCORE()", "float", JDBCType.REAL, 15))); @@ -184,7 +184,7 @@ public void testScoreWithFieldNamedScore() throws IOException { Map expected = new HashMap<>(); boolean columnar = randomBoolean(); expected.put("columns", Arrays.asList( - columnInfo(mode, "name", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "name", "text", JDBCType.VARCHAR, Integer.MAX_VALUE), columnInfo(mode, "score", "long", JDBCType.BIGINT, 20), columnInfo(mode, "SCORE()", "float", JDBCType.REAL, 15))); if (columnar) { @@ -427,7 +427,7 @@ public void testBasicQueryWithFilter() throws IOException { "{\"test\":\"bar\"}"); Map expected = new HashMap<>(); - expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0))); + expected.put("columns", singletonList(columnInfo(mode, "test", "text", JDBCType.VARCHAR, Integer.MAX_VALUE))); expected.put("rows", singletonList(singletonList("foo"))); assertResponse(expected, runSql(new StringEntity("{\"query\":\"SELECT * FROM test\", " + "\"filter\":{\"match\": {\"test\": \"foo\"}}" + mode(mode) + "}", @@ -442,7 +442,7 @@ public void testBasicQueryWithParameters() throws IOException { Map expected = new HashMap<>(); expected.put("columns", Arrays.asList( - columnInfo(mode, "test", "text", JDBCType.VARCHAR, 0), + columnInfo(mode, "test", "text", JDBCType.VARCHAR, Integer.MAX_VALUE), columnInfo(mode, "param", "integer", JDBCType.INTEGER, 11) )); if (columnar) { diff --git a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec index 41a82ac0f84bb..820c358ab2f62 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/docs/docs.csv-spec @@ -929,6 +929,84 @@ null |25324 // end::orderByAgg ; +simpleMatch +// tag::simpleMatch +SELECT author, name FROM library WHERE MATCH(author, 'frank'); + + author | name +---------------+------------------- +Frank Herbert |Dune +Frank Herbert |Dune Messiah +Frank Herbert |Children of Dune +Frank Herbert |God Emperor of Dune + +// end::simpleMatch +; + +multiFieldsMatch +// tag::multiFieldsMatch +SELECT author, name, SCORE() FROM library WHERE MATCH('author^2,name^5', 'frank dune'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |Dune |11.443176 +Frank Herbert |Dune Messiah |9.446629 +Frank Herbert |Children of Dune |8.043278 +Frank Herbert |God Emperor of Dune|7.0029488 + +// end::multiFieldsMatch +; + +optionalParamsForMatch +// tag::optionalParamsForMatch +SELECT author, name, SCORE() FROM library WHERE MATCH(name, 'to the star', 'operator=or;cutoff_frequency=0.2'); + + author | name | SCORE() +-----------------+------------------------------------+--------------- +Peter F. Hamilton|Pandora's Star |3.0997515 +Douglas Adams |The Hitchhiker's Guide to the Galaxy|3.1756816 + +// end::optionalParamsForMatch +; + +simpleQueryQuery +// tag::simpleQueryQuery +SELECT author, name, SCORE() FROM library WHERE QUERY('name:dune'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |Dune |2.2886353 +Frank Herbert |Dune Messiah |1.8893257 +Frank Herbert |Children of Dune |1.6086556 +Frank Herbert |God Emperor of Dune|1.4005898 +// end::simpleQueryQuery +; + +advancedQueryQuery +// tag::advancedQueryQuery +SELECT author, name, page_count, SCORE() FROM library WHERE QUERY('_exists_:"author" AND page_count:>200 AND (name:/star.*/ OR name:duna~)'); + + author | name | page_count | SCORE() +------------------+-------------------+---------------+--------------- +Frank Herbert |Dune |604 |3.7164764 +Frank Herbert |Dune Messiah |331 |3.4169943 +Frank Herbert |Children of Dune |408 |3.2064917 +Frank Herbert |God Emperor of Dune|454 |3.0504425 +Peter F. Hamilton |Pandora's Star |768 |3.0 +Robert A. Heinlein|Starship Troopers |335 |3.0 +// end::advancedQueryQuery +; + +optionalParameterQuery +// tag::optionalParameterQuery +SELECT author, name, SCORE() FROM library WHERE QUERY('dune god', 'default_operator=and;default_field=name'); + + author | name | SCORE() +---------------+-------------------+--------------- +Frank Herbert |God Emperor of Dune|3.6984892 +// end::optionalParameterQuery +; + orderByScore // tag::orderByScore SELECT SCORE(), * FROM library WHERE MATCH(name, 'dune') ORDER BY SCORE() DESC; diff --git a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec index 07df14d99e36b..99aa07ec91f4d 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec +++ b/x-pack/plugin/sql/qa/src/main/resources/fulltext.csv-spec @@ -30,6 +30,60 @@ SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE QUERY('Man*', ' 10096 |Jayson |M |Mandell ; +matchWithFuzziness +SELECT first_name, SCORE() FROM test_emp WHERE MATCH(first_name, 'geo', 'fuzziness=6'); + + first_name:s | SCORE():f +----------------+--------------- +Gino |1.3684646 +Gao |2.7369292 +; + +matchWithFuzzinessAuto +SELECT first_name, SCORE() FROM test_emp WHERE MATCH(first_name, 'geo', 'fuzziness=AUTO:1,7;fuzzy_rewrite=scoring_boolean'); + + first_name:s | SCORE():f +----------------+--------------- +Gao |2.7369292 +; + +multiMatchWithFuzzinessAuto +SELECT first_name, last_name, SCORE() FROM test_emp WHERE MATCH('first_name^3,last_name^5', 'geo hir', 'fuzziness=AUTO:1,5;operator=or') ORDER BY first_name; + + first_name:s | last_name:s | SCORE():f +----------------+-----------------+--------------- +Gao |Dolinsky |8.210788 +Shir |McClurg |8.210788 +; + +multiMatchWithFuzziness +SELECT first_name, last_name, SCORE() FROM test_emp WHERE MATCH('first_name^3,last_name^5', 'geo hir', 'fuzziness=5;operator=or') ORDER BY first_name; + + first_name:s | last_name:s | SCORE():f +----------------+-----------------+--------------- +Gao |Dolinsky |8.210788 +Gino |Leonhardt |4.105394 +Shir |McClurg |8.210788 +Uri |Lenart |4.105394 +; + +queryWithFuzziness +SELECT first_name, SCORE() FROM test_emp WHERE QUERY('geo~', 'fuzziness=5;default_field=first_name'); + + first_name:s | SCORE():f +----------------+--------------- +Gino |1.3684646 +Gao |2.7369292 +; + +queryWithFuzzinessAuto +SELECT first_name, SCORE() FROM test_emp WHERE QUERY('geo~', 'fuzziness=AUTO:1,5;default_field=first_name'); + + first_name:s | SCORE():f +----------------+--------------- +Gao |2.7369292 +; + matchQuery SELECT emp_no, first_name, gender, last_name FROM test_emp WHERE MATCH(first_name, 'Erez'); diff --git a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql index f61d48af4ff37..6292a6296ff69 100644 --- a/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql +++ b/x-pack/plugin/sql/qa/src/main/resources/setup_mock_metadata_get_columns.sql @@ -23,12 +23,12 @@ CREATE TABLE mock ( IS_AUTOINCREMENT VARCHAR, IS_GENERATEDCOLUMN VARCHAR ) AS -SELECT null, 'test1', 'name', 12, 'TEXT', 0, 2147483647, null, null, +SELECT null, 'test1', 'name', 12, 'TEXT', 2147483647, 2147483647, null, null, 1, -- columnNullable null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL UNION ALL -SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 0, 2147483647, null, null, +SELECT null, 'test1', 'name.keyword', 12, 'KEYWORD', 32766, 2147483647, null, null, 1, -- columnNullable null, null, 12, 0, 2147483647, 1, 'YES', null, null, null, null, 'NO', 'NO' FROM DUAL diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java index ef4e603564ef6..7cf31781d9e8f 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/ColumnInfo.java @@ -119,7 +119,7 @@ public boolean equals(Object o) { return false; } ColumnInfo that = (ColumnInfo) o; - return displaySize == that.displaySize && + return Objects.equals(displaySize, that.displaySize) && Objects.equals(table, that.table) && Objects.equals(name, that.name) && Objects.equals(esType, that.esType); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java index 87709ac104e08..04935023747c3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/parser/CommandBuilder.java @@ -149,9 +149,8 @@ public SysTables visitSysTables(SysTablesContext ctx) { if (value != null) { // check special ODBC wildcard case if (value.equals(StringUtils.SQL_WILDCARD) && ctx.string().size() == 1) { - // convert % to enumeration - // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments?view=ssdt-18vs2017 - types.addAll(IndexType.VALID); + // treat % as null + // https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/value-list-arguments } // special case for legacy apps (like msquery) that always asks for 'TABLE' // which we manually map to all concrete tables supported diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java index 5ce1e6dcc8a70..53f1e1019b753 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTables.java @@ -14,9 +14,8 @@ import org.elasticsearch.xpack.sql.session.Rows; import org.elasticsearch.xpack.sql.session.SchemaRowSet; import org.elasticsearch.xpack.sql.session.SqlSession; -import org.elasticsearch.xpack.sql.tree.Source; import org.elasticsearch.xpack.sql.tree.NodeInfo; -import org.elasticsearch.xpack.sql.util.CollectionUtils; +import org.elasticsearch.xpack.sql.tree.Source; import java.util.ArrayList; import java.util.Comparator; @@ -77,8 +76,11 @@ public final void execute(SqlSession session, ActionListener liste // namely one param specified with '%', everything else empty string // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqltables-function?view=ssdt-18vs2017#comments - if (clusterPattern != null && clusterPattern.pattern().equals(SQL_WILDCARD)) { - if ((pattern == null || pattern.pattern().isEmpty()) && CollectionUtils.isEmpty(types)) { + // catalog enumeration + if (clusterPattern == null || clusterPattern.pattern().equals(SQL_WILDCARD)) { + // enumerate only if pattern is "" and no types are specified (types is null) + if (pattern != null && pattern.pattern().isEmpty() && index == null + && types == null) { Object[] enumeration = new Object[10]; // send only the cluster, everything else null enumeration[0] = cluster; @@ -87,12 +89,15 @@ public final void execute(SqlSession session, ActionListener liste } } - // if no types were specified (the parser takes care of the % case) - if (IndexType.VALID.equals(types)) { - if ((clusterPattern == null || clusterPattern.pattern().isEmpty()) - && (pattern == null || pattern.pattern().isEmpty())) { + // enumerate types + // if no types are specified (the parser takes care of the % case) + if (types == null) { + // empty string for catalog + if (clusterPattern != null && clusterPattern.pattern().isEmpty() + // empty string for table like and no index specified + && pattern != null && pattern.pattern().isEmpty() && index == null) { List> values = new ArrayList<>(); - // send only the types, everything else null + // send only the types, everything else is made of empty strings for (IndexType type : IndexType.VALID) { Object[] enumeration = new Object[10]; enumeration[3] = type.toSql(); @@ -105,7 +110,7 @@ public final void execute(SqlSession session, ActionListener liste } } - + // no enumeration pattern found, list actual tables String cRegex = clusterPattern != null ? clusterPattern.asJavaRegex() : null; // if the catalog doesn't match, don't return any results diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java index 0d4ee07603161..2112128b41b00 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypes.java @@ -82,8 +82,7 @@ public final void execute(SqlSession session, ActionListener liste .sorted(Comparator.comparing((DataType t) -> t.sqlType.getVendorTypeNumber()).thenComparing(DataType::sqlName)) .map(t -> asList(t.toString(), t.sqlType.getVendorTypeNumber(), - //https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/column-size?view=sql-server-2017 - t.defaultPrecision, + DataTypes.precision(t), "'", "'", null, diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java index d0fe697268d41..7bddacb86bf74 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MatchQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; @@ -28,16 +29,17 @@ public class MatchQuery extends LeafQuery { // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first // TODO: add zero terms query support, I'm not sure the best way to parse it yet... // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("operator", (qb, s) -> qb.operator(Operator.fromString(s))); - appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("prefix_length", (qb, s) -> qb.prefixLength(Integer.valueOf(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java index 4f0bc0720ae83..2c6b47d7bdcc3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/MultiMatchQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.QueryBuilder; @@ -29,18 +30,19 @@ public class MultiMatchQuery extends LeafQuery { appliers.put("slop", (qb, s) -> qb.slop(Integer.valueOf(s))); // TODO: add zero terms query support, I'm not sure the best way to parse it yet... // appliers.put("zero_terms_query", (qb, s) -> qb.zeroTermsQuery(s)); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("cutoff_frequency", (qb, s) -> qb.cutoffFrequency(Float.valueOf(s))); - appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("operator", (qb, s) -> qb.operator(Operator.fromString(s))); - appliers.put("max_expansions", (qb, s) -> qb.maxExpansions(Integer.valueOf(s))); appliers.put("prefix_length", (qb, s) -> qb.prefixLength(Integer.valueOf(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); appliers.put("type", (qb, s) -> qb.type(s)); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); - appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java index de457ba918e7c..a6d8ff2dbf5fc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/querydsl/query/QueryStringQuery.java @@ -6,6 +6,7 @@ package org.elasticsearch.xpack.sql.querydsl.query; import org.elasticsearch.common.Booleans; +import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.index.query.MultiMatchQueryBuilder; import org.elasticsearch.index.query.Operator; @@ -28,30 +29,29 @@ public class QueryStringQuery extends LeafQuery { static { HashMap> appliers = new HashMap<>(28); // TODO: it'd be great if these could be constants instead of Strings, needs a core change to make the fields public first + appliers.put("allow_leading_wildcard", (qb, s) -> qb.allowLeadingWildcard(Booleans.parseBoolean(s))); + appliers.put("analyze_wildcard", (qb, s) -> qb.analyzeWildcard(Booleans.parseBoolean(s))); + appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); + appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); appliers.put("default_field", (qb, s) -> qb.defaultField(s)); appliers.put("default_operator", (qb, s) -> qb.defaultOperator(Operator.fromString(s))); - appliers.put("analyzer", (qb, s) -> qb.analyzer(s)); - appliers.put("quote_analyzer", (qb, s) -> qb.quoteAnalyzer(s)); - appliers.put("allow_leading_wildcard", (qb, s) -> qb.allowLeadingWildcard(Booleans.parseBoolean(s))); - appliers.put("max_determinized_states", (qb, s) -> qb.maxDeterminizedStates(Integer.valueOf(s))); - appliers.put("lowercase_expanded_terms", (qb, s) -> {}); appliers.put("enable_position_increments", (qb, s) -> qb.enablePositionIncrements(Booleans.parseBoolean(s))); appliers.put("escape", (qb, s) -> qb.escape(Booleans.parseBoolean(s))); - appliers.put("fuzzy_prefix_length", (qb, s) -> qb.fuzzyPrefixLength(Integer.valueOf(s))); + appliers.put("fuzziness", (qb, s) -> qb.fuzziness(Fuzziness.build(s))); appliers.put("fuzzy_max_expansions", (qb, s) -> qb.fuzzyMaxExpansions(Integer.valueOf(s))); + appliers.put("fuzzy_prefix_length", (qb, s) -> qb.fuzzyPrefixLength(Integer.valueOf(s))); appliers.put("fuzzy_rewrite", (qb, s) -> qb.fuzzyRewrite(s)); + appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); + appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); + appliers.put("max_determinized_states", (qb, s) -> qb.maxDeterminizedStates(Integer.valueOf(s))); + appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); appliers.put("phrase_slop", (qb, s) -> qb.phraseSlop(Integer.valueOf(s))); - appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); - appliers.put("analyze_wildcard", (qb, s) -> qb.analyzeWildcard(Booleans.parseBoolean(s))); appliers.put("rewrite", (qb, s) -> qb.rewrite(s)); - appliers.put("minimum_should_match", (qb, s) -> qb.minimumShouldMatch(s)); + appliers.put("quote_analyzer", (qb, s) -> qb.quoteAnalyzer(s)); appliers.put("quote_field_suffix", (qb, s) -> qb.quoteFieldSuffix(s)); - appliers.put("lenient", (qb, s) -> qb.lenient(Booleans.parseBoolean(s))); - appliers.put("locale", (qb, s) -> {}); + appliers.put("tie_breaker", (qb, s) -> qb.tieBreaker(Float.valueOf(s))); appliers.put("time_zone", (qb, s) -> qb.timeZone(s)); appliers.put("type", (qb, s) -> qb.type(MultiMatchQueryBuilder.Type.parse(s, LoggingDeprecationHandler.INSTANCE))); - appliers.put("auto_generate_synonyms_phrase_query", (qb, s) -> qb.autoGenerateSynonymsPhraseQuery(Booleans.parseBoolean(s))); - appliers.put("fuzzy_transpositions", (qb, s) -> qb.fuzzyTranspositions(Booleans.parseBoolean(s))); BUILDER_APPLIERS = Collections.unmodifiableMap(appliers); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java index f75d0a8f7352a..deeeed1c1ca16 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataType.java @@ -36,19 +36,19 @@ public enum DataType { DOUBLE( "double", JDBCType.DOUBLE, Double.BYTES, 15, 25, false, true, true), // 24 bits defaultPrecision - 24*log10(2) =~ 7 (7.22) FLOAT( "float", JDBCType.REAL, Float.BYTES, 7, 15, false, true, true), - HALF_FLOAT( "half_float", JDBCType.FLOAT, Double.BYTES, 16, 25, false, true, true), + HALF_FLOAT( "half_float", JDBCType.FLOAT, Float.BYTES, 3, 25, false, true, true), // precision is based on long - SCALED_FLOAT( "scaled_float", JDBCType.FLOAT, Double.BYTES, 19, 25, false, true, true), - KEYWORD( "keyword", JDBCType.VARCHAR, Integer.MAX_VALUE, 256, 0, false, false, true), - TEXT( "text", JDBCType.VARCHAR, Integer.MAX_VALUE, Integer.MAX_VALUE, 0, false, false, false), + SCALED_FLOAT( "scaled_float", JDBCType.DOUBLE, Long.BYTES, 15, 25, false, true, true), + KEYWORD( "keyword", JDBCType.VARCHAR, Integer.MAX_VALUE, 32766, 32766, false, false, true), + TEXT( "text", JDBCType.VARCHAR, Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE, false, false, false), OBJECT( "object", JDBCType.STRUCT, -1, 0, 0, false, false, false), NESTED( "nested", JDBCType.STRUCT, -1, 0, 0, false, false, false), - BINARY( "binary", JDBCType.VARBINARY, -1, Integer.MAX_VALUE, 0, false, false, false), + BINARY( "binary", JDBCType.VARBINARY, -1, Integer.MAX_VALUE, Integer.MAX_VALUE, false, false, false), DATE( JDBCType.DATE, Long.BYTES, 24, 24, false, false, true), // since ODBC and JDBC interpret precision for Date as display size // the precision is 23 (number of chars in ISO8601 with millis) + Z (the UTC timezone) // see https://github.com/elastic/elasticsearch/issues/30386#issuecomment-386807288 - DATETIME( "date", JDBCType.TIMESTAMP, Long.BYTES, 24, 24, false, false, true), + DATETIME( "date", JDBCType.TIMESTAMP, Long.BYTES, 3, 24, false, false, true), // // specialized types // diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java index 5a3fa235e9a73..f8d657447923a 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/type/DataTypes.java @@ -175,7 +175,7 @@ public static Integer metaSqlDataType(DataType t) { } // https://github.com/elastic/elasticsearch/issues/30386 - // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function?view=sql-server-2017 + // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function public static Integer metaSqlDateTimeSub(DataType t) { if (t == DATETIME) { // ODBC SQL_CODE_TIMESTAMP @@ -185,37 +185,30 @@ public static Integer metaSqlDateTimeSub(DataType t) { return 0; } - // https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/decimal-digits?view=sql-server-2017 public static Short metaSqlMinimumScale(DataType t) { - // TODO: return info for HALF/SCALED_FLOATS (should be based on field not type) - if (t == DATETIME) { - return Short.valueOf((short) 3); - } - if (t.isInteger()) { - return Short.valueOf((short) 0); - } - // minimum scale? - if (t.isRational()) { - return Short.valueOf((short) 0); - } - return null; + return metaSqlSameScale(t); } public static Short metaSqlMaximumScale(DataType t) { - // TODO: return info for HALF/SCALED_FLOATS (should be based on field not type) - if (t == DATETIME) { - return Short.valueOf((short) 3); - } + return metaSqlSameScale(t); + } + + // https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/decimal-digits + // https://github.com/elastic/elasticsearch/issues/40357 + // since the scale is fixed, minimum and maximum should return the same value + // hence why this method exists + private static Short metaSqlSameScale(DataType t) { + // TODO: return info for SCALED_FLOATS (should be based on field not type) if (t.isInteger()) { return Short.valueOf((short) 0); } - if (t.isRational()) { + if (t.isDateBased() || t.isRational()) { return Short.valueOf((short) t.defaultPrecision); } return null; } - // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function?view=sql-server-2017 + // https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function public static Integer metaSqlRadix(DataType t) { // RADIX - Determines how numbers returned by COLUMN_SIZE and DECIMAL_DIGITS should be interpreted. // 10 means they represent the number of decimal digits allowed for the column. @@ -223,4 +216,13 @@ public static Integer metaSqlRadix(DataType t) { // null means radix is not applicable for the given type. return t.isInteger() ? Integer.valueOf(10) : (t.isRational() ? Integer.valueOf(2) : null); } + + //https://docs.microsoft.com/en-us/sql/odbc/reference/syntax/sqlgettypeinfo-function#comments + //https://docs.microsoft.com/en-us/sql/odbc/reference/appendixes/column-size + public static Integer precision(DataType t) { + if (t.isNumeric()) { + return t.defaultPrecision; + } + return t.displaySize; + } } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java index 60047fcdbe799..5f85ff90e344c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/action/SqlActionIT.java @@ -33,7 +33,7 @@ public void testSqlAction() { assertThat(response.columns(), hasSize(2)); int dataIndex = dataBeforeCount ? 0 : 1; int countIndex = dataBeforeCount ? 1 : 0; - assertEquals(new ColumnInfo("", "data", "text", 0), response.columns().get(dataIndex)); + assertEquals(new ColumnInfo("", "data", "text", 2147483647), response.columns().get(dataIndex)); assertEquals(new ColumnInfo("", "count", "long", 20), response.columns().get(countIndex)); assertThat(response.rows(), hasSize(2)); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java index a1accd28ab4d9..c6c993967dd1b 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysParserTests.java @@ -60,7 +60,7 @@ private Tuple sql(String sql) { public void testSysTypes() throws Exception { Command cmd = sql("SYS TYPES").v1(); - List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", + List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "FLOAT", "DOUBLE", "SCALED_FLOAT", "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "DATETIME", "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java index 74ecdc80c12d6..e2baeb2d8af98 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTablesTests.java @@ -51,20 +51,60 @@ public class SysTablesTests extends ESTestCase { private final IndexInfo index = new IndexInfo("test", IndexType.INDEX); private final IndexInfo alias = new IndexInfo("alias", IndexType.ALIAS); - public void testSysTablesEnumerateCatalog() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '%'", r -> { + // + // catalog enumeration + // + public void testSysTablesCatalogEnumeration() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '%' LIKE ''", r -> { assertEquals(1, r.size()); assertEquals(CLUSTER_NAME, r.column(0)); - }); + // everything else should be null + for (int i = 1; i < 10; i++) { + assertNull(r.column(i)); + } + }, index); + } + + // + // table types enumeration + // + public void testSysTablesTypesEnumerationWoString() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { + assertEquals(2, r.size()); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("VIEW", r.column(3)); + }, new IndexInfo[0]); } public void testSysTablesEnumerateTypes() throws Exception { - executeCommand("SYS TABLES TYPE '%'", r -> { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { assertEquals(2, r.size()); assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); assertEquals("VIEW", r.column(3)); - }); + }, alias, index); + } + + public void testSysTablesTypesEnumeration() throws Exception { + executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { + assertEquals(2, r.size()); + + Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); + + for (int t = 0; t < r.size(); t++) { + assertEquals(it.next().toSql(), r.column(3)); + + // everything else should be null + for (int i = 0; i < 10; i++) { + if (i != 3) { + assertNull(r.column(i)); + } + } + + r.advanceRow(); + } + }, new IndexInfo[0]); } public void testSysTablesDifferentCatalog() throws Exception { @@ -77,17 +117,42 @@ public void testSysTablesDifferentCatalog() throws Exception { public void testSysTablesNoTypes() throws Exception { executeCommand("SYS TABLES", r -> { assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, alias); + } + + public void testSysTablesWithLegacyTypes() throws Exception { + executeCommand("SYS TABLES TYPE 'TABLE', 'ALIAS'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, alias); + } + + public void testSysTablesWithProperTypes() throws Exception { + executeCommand("SYS TABLES TYPE 'BASE TABLE', 'ALIAS'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); assertEquals("VIEW", r.column(3)); }, index, alias); } public void testSysTablesPattern() throws Exception { executeCommand("SYS TABLES LIKE '%'", r -> { + assertEquals(2, r.size()); assertEquals("test", r.column(2)); + assertEquals("BASE TABLE", r.column(3)); assertTrue(r.advanceRow()); - assertEquals(2, r.size()); assertEquals("alias", r.column(2)); }, index, alias); } @@ -130,7 +195,18 @@ public void testSysTablesOnlyIndicesInLegacyMode() throws Exception { assertEquals("test", r.column(2)); assertEquals("TABLE", r.column(3)); }, index); + } + + public void testSysTablesNoPatternWithTypesSpecifiedInLegacyMode() throws Exception { + executeCommand("SYS TABLES TYPE 'TABLE','VIEW'", r -> { + assertEquals(2, r.size()); + assertEquals("test", r.column(2)); + assertEquals("TABLE", r.column(3)); + assertTrue(r.advanceRow()); + assertEquals("alias", r.column(2)); + assertEquals("VIEW", r.column(3)); + }, index, alias); } public void testSysTablesOnlyIndicesLegacyModeParameterized() throws Exception { @@ -192,43 +268,6 @@ public void testSysTablesWithInvalidType() throws Exception { }, new IndexInfo[0]); } - public void testSysTablesCatalogEnumeration() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '%' LIKE ''", r -> { - assertEquals(1, r.size()); - assertEquals(CLUSTER_NAME, r.column(0)); - // everything else should be null - for (int i = 1; i < 10; i++) { - assertNull(r.column(i)); - } - }, new IndexInfo[0]); - } - - public void testSysTablesTypesEnumeration() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' TYPE '%'", r -> { - assertEquals(2, r.size()); - - Iterator it = IndexType.VALID.stream().sorted(Comparator.comparing(IndexType::toSql)).iterator(); - - for (int t = 0; t < r.size(); t++) { - assertEquals(it.next().toSql(), r.column(3)); - - // everything else should be null - for (int i = 0; i < 10; i++) { - if (i != 3) { - assertNull(r.column(i)); - } - } - - r.advanceRow(); - } - }, new IndexInfo[0]); - } - - public void testSysTablesTypesEnumerationWoString() throws Exception { - executeCommand("SYS TABLES CATALOG LIKE '' LIKE '' ", r -> { - assertEquals(0, r.size()); - }, new IndexInfo[0]); - } private SqlTypedParamValue param(Object value) { return new SqlTypedParamValue(DataTypes.fromJava(value).typeName, value); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java index 4e428846dc2f4..9c1ef31fcb170 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysTypesTests.java @@ -43,7 +43,7 @@ private Tuple sql(String sql) { public void testSysTypes() { Command cmd = sql("SYS TYPES").v1(); - List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "SCALED_FLOAT", "FLOAT", "DOUBLE", + List names = asList("BYTE", "LONG", "BINARY", "NULL", "INTEGER", "SHORT", "HALF_FLOAT", "FLOAT", "DOUBLE", "SCALED_FLOAT", "KEYWORD", "TEXT", "IP", "BOOLEAN", "DATE", "DATETIME", "INTERVAL_YEAR", "INTERVAL_MONTH", "INTERVAL_DAY", "INTERVAL_HOUR", "INTERVAL_MINUTE", "INTERVAL_SECOND", "INTERVAL_YEAR_TO_MONTH", "INTERVAL_DAY_TO_HOUR", "INTERVAL_DAY_TO_MINUTE", "INTERVAL_DAY_TO_SECOND", diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java index 47f01be917867..a789324e0b478 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/DataTypesTests.java @@ -55,7 +55,7 @@ public void testMetaDateTypeSub() { public void testMetaMinimumScale() { assertEquals(Short.valueOf((short) 3), metaSqlMinimumScale(DATETIME)); assertEquals(Short.valueOf((short) 0), metaSqlMinimumScale(LONG)); - assertEquals(Short.valueOf((short) 0), metaSqlMinimumScale(FLOAT)); + assertEquals(Short.valueOf((short) FLOAT.defaultPrecision), metaSqlMaximumScale(FLOAT)); assertNull(metaSqlMinimumScale(KEYWORD)); } diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java index 2a2488dda722f..65b491fe71a1d 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/type/TypesTests.java @@ -81,7 +81,7 @@ public void testDateField() { EsField field = mapping.get("date"); assertThat(field.getDataType(), is(DATETIME)); assertThat(field.isAggregatable(), is(true)); - assertThat(field.getPrecision(), is(24)); + assertThat(field.getPrecision(), is(3)); } public void testDateNoFormat() { diff --git a/x-pack/plugin/sql/src/test/resources/mapping-nested.json b/x-pack/plugin/sql/src/test/resources/mapping-nested.json index d9b6398458f14..1251d17525a00 100644 --- a/x-pack/plugin/sql/src/test/resources/mapping-nested.json +++ b/x-pack/plugin/sql/src/test/resources/mapping-nested.json @@ -10,8 +10,7 @@ "type" : "text", "fields" : { "keyword" : { - "type" : "keyword", - "ignore_above" : 256 + "type" : "keyword" } } }, diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.put_role_mapping.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.put_role_mapping.json index 626ff0d6da80c..d65cf8f835833 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/api/security.put_role_mapping.json +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/security.put_role_mapping.json @@ -23,7 +23,7 @@ } }, "body": { - "description" : "The role to add", + "description" : "The role mapping to add", "required" : true } } diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/20_has_application_privs.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/20_has_application_privs.yml index 85ac286c3f025..eb92cc252b560 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/20_has_application_privs.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/privileges/20_has_application_privs.yml @@ -28,6 +28,16 @@ setup: "name": "write", "actions": [ "data:write/*" ] } + }, + "yourapp-v1" : { + "read": { + "actions": [ "data:read/*" ] + } + }, + "yourapp-v2" : { + "read": { + "actions": [ "data:read/*" ] + } } } @@ -83,6 +93,21 @@ setup: } ] } + - do: + security.put_role: + name: "yourapp_read_config" + body: > + { + "cluster": [], + "indices": [], + "applications": [ + { + "application": "yourapp-*", + "privileges": ["read"], + "resources": ["settings/*"] + } + ] + } # And a user for each role - do: @@ -101,6 +126,14 @@ setup: "password": "p@ssw0rd", "roles" : [ "myapp_engineering_write" ] } + - do: + security.put_user: + username: "your_read" + body: > + { + "password": "p@ssw0rd", + "roles" : [ "yourapp_read_config" ] + } --- teardown: @@ -109,6 +142,16 @@ teardown: application: myapp name: "user,read,write" ignore: 404 + - do: + security.delete_privileges: + application: yourapp-v1 + name: "read" + ignore: 404 + - do: + security.delete_privileges: + application: yourapp-v2 + name: "read" + ignore: 404 - do: security.delete_user: @@ -120,6 +163,11 @@ teardown: username: "eng_write" ignore: 404 + - do: + security.delete_user: + username: "your_read" + ignore: 404 + - do: security.delete_role: name: "myapp_engineering_read" @@ -129,6 +177,11 @@ teardown: security.delete_role: name: "myapp_engineering_write" ignore: 404 + + - do: + security.delete_role: + name: "yourapp_read_config" + ignore: 404 --- "Test has_privileges with application-privileges": - do: @@ -188,3 +241,53 @@ teardown: } } } + - do: + headers: { Authorization: "Basic eW91cl9yZWFkOnBAc3N3MHJk" } # your_read + security.has_privileges: + user: null + body: > + { + "application": [ + { + "application" : "yourapp-v1", + "resources" : [ "settings/host", "settings/port", "system/key" ], + "privileges" : [ "data:read/settings", "data:write/settings", "read", "write" ] + }, + { + "application" : "yourapp-v2", + "resources" : [ "settings/host" ], + "privileges" : [ "data:read/settings", "data:write/settings" ] + } + ] + } + + - match: { "username" : "your_read" } + - match: { "has_all_requested" : false } + - match: { "application": { + "yourapp-v1": { + "settings/host": { + "data:read/settings": true, + "data:write/settings": false, + "read": true, + "write": false + }, + "settings/port": { + "data:read/settings": true, + "data:write/settings": false, + "read": true, + "write": false + }, + "system/key": { + "data:read/settings": false, + "data:write/settings": false, + "read": false, + "write": false + } + }, + "yourapp-v2": { + "settings/host": { + "data:read/settings": true, + "data:write/settings": false, + } + } + } } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java index 02ce97d4ea218..521cc2d49fc3f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/actions/webhook/WebhookIntegrationTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.actions.webhook; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.transport.TransportAddress; @@ -43,6 +44,7 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503") public class WebhookIntegrationTests extends AbstractWatcherIntegrationTestCase { private MockWebServer webServer = new MockWebServer(); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java index 2f2299d7d65e0..05d8b4ef29ded 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/BasicWatcherTests.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.xpack.watcher.test.integration; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; @@ -62,6 +63,7 @@ @TestLogging("org.elasticsearch.xpack.watcher:DEBUG," + "org.elasticsearch.xpack.watcher.WatcherIndexingListener:TRACE") +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35503") public class BasicWatcherTests extends AbstractWatcherIntegrationTestCase { public void testIndexWatch() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java index f8ddc3065f79d..3eefa03137146 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java @@ -87,6 +87,7 @@ protected Settings nodeSettings(int nodeOrdinal) { return super.nodeSettings(nodeOrdinal); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/40587") public void testHttpInput() throws Exception { WatcherClient watcherClient = watcherClient(); watcherClient.preparePutWatch("_id") diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java index a0ef5e97d8534..0e95a15b2a35c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/WatchAckTests.java @@ -122,6 +122,7 @@ public void testAckSingleAction() throws Exception { assertThat(throttledCount, greaterThan(0L)); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/35506") public void testAckAllActions() throws Exception { PutWatchResponse putWatchResponse = watcherClient().preparePutWatch() .setId("_id") diff --git a/x-pack/qa/full-cluster-restart/build.gradle b/x-pack/qa/full-cluster-restart/build.gradle index 4c2ecd2b7b4ca..40dca76abc913 100644 --- a/x-pack/qa/full-cluster-restart/build.gradle +++ b/x-pack/qa/full-cluster-restart/build.gradle @@ -3,10 +3,10 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.Version import java.nio.charset.StandardCharsets -import java.util.regex.Matcher // Apply the java plugin to this project so the sources can be edited in an IDE -apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.standalone-test' + unitTest.enabled = false dependencies { @@ -70,8 +70,6 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> return tmpFile.exists() } -Project mainProject = project - String coreFullClusterRestartPath = project(':qa:full-cluster-restart').projectDir.toPath().resolve('src/test/java').toString() sourceSets { test { @@ -89,224 +87,157 @@ forbiddenPatterns { exclude '**/system_key' } -// tests are pushed down to subprojects -testingConventions.enabled = false - -/** - * Subdirectories of this project are test rolling upgrades with various - * configuration options based on their name. - */ -subprojects { - Matcher m = project.name =~ /with(out)?-system-key/ - if (false == m.matches()) { - throw new InvalidUserDataException("Invalid project name [${project.name}]") - } - boolean withSystemKey = m.group(1) == null - - apply plugin: 'elasticsearch.standalone-test' +String outputDir = "${buildDir}/generated-resources/${project.name}" - // Use resources from the rolling-upgrade project in subdirectories - sourceSets { - test { - java { - srcDirs = ["${mainProject.projectDir}/src/test/java", coreFullClusterRestartPath] - } - resources { - srcDirs = ["${mainProject.projectDir}/src/test/resources"] - } - } - } - - licenseHeaders { - approvedLicenses << 'Apache' - } - - forbiddenPatterns { - exclude '**/system_key' - } - - String outputDir = "${buildDir}/generated-resources/${project.name}" - - // This is a top level task which we will add dependencies to below. - // It is a single task that can be used to backcompat tests against all versions. - task bwcTest { +// This is a top level task which we will add dependencies to below. +// It is a single task that can be used to backcompat tests against all versions. +task bwcTest { description = 'Runs backwards compatibility tests.' group = 'verification' - } +} - String output = "${buildDir}/generated-resources/${project.name}" - task copyTestNodeKeyMaterial(type: Copy) { +task copyTestNodeKeyMaterial(type: Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') into outputDir - } +} - for (Version version : bwcVersions.indexCompatible) { +for (Version version : bwcVersions.indexCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { - mustRunAfter(precommit) + mustRunAfter(precommit) } Object extension = extensions.findByName("${baseName}#oldClusterTestCluster") configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { - dependsOn copyTestNodeKeyMaterial - if (version.before('6.3.0')) { - String depVersion = version; - if (project.bwcVersions.unreleased.contains(version)) { - depVersion += "-SNAPSHOT" - } - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" - - } - bwcVersion = version - numBwcNodes = 2 - numNodes = 2 - clusterName = 'full-cluster-restart' - String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' - setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = waitWithAuth - - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '20m' - - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - setting 'xpack.license.self_generated.type', 'trial' - dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') - extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - if (withSystemKey) { - if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { - // The setting didn't exist until 5.1.0 - setting 'xpack.security.system_key.required', 'true' + dependsOn copyTestNodeKeyMaterial + if (version.before('6.3.0')) { + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" + } - if (version.onOrAfter('6.0.0')) { - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + bwcVersion = version + numBwcNodes = 2 + numNodes = 2 + clusterName = 'full-cluster-restart' + String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' + setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth + + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' + + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' } else { - extraConfigFile 'x-pack/system_key', "${mainProject.projectDir}/src/test/resources/system_key" + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' } + setting 'xpack.license.self_generated.type', 'trial' + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" setting 'xpack.watcher.encrypt_sensitive_data', 'true' - } } Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") oldClusterTestRunner.configure { - systemProperty 'tests.is_old_cluster', 'true' - systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' + systemProperty 'tests.is_old_cluster', 'true' + systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' + exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) configure(extensions.findByName("${baseName}#upgradedClusterTestCluster")) { - dependsOn oldClusterTestRunner, - "${baseName}#oldClusterTestCluster#node0.stop", - "${baseName}#oldClusterTestCluster#node1.stop" - numNodes = 2 - clusterName = 'full-cluster-restart' - dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } - cleanShared = false // We want to keep snapshots made by the old cluster! - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - waitCondition = waitWithAuth - - // some tests rely on the translog not being flushed - setting 'indices.memory.shard_inactive_time', '20m' - setting 'xpack.security.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - setting 'xpack.license.self_generated.type', 'trial' - dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') - extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } + dependsOn oldClusterTestRunner, + "${baseName}#oldClusterTestCluster#node0.stop", + "${baseName}#oldClusterTestCluster#node1.stop" + numNodes = 2 + clusterName = 'full-cluster-restart' + dataDir = { nodeNum -> oldClusterTest.nodes[nodeNum].dataDir } + cleanShared = false // We want to keep snapshots made by the old cluster! + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + waitCondition = waitWithAuth + + // some tests rely on the translog not being flushed + setting 'indices.memory.shard_inactive_time', '20m' + setting 'xpack.security.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } + setting 'xpack.license.self_generated.type', 'trial' + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" } Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { - systemProperty 'tests.is_old_cluster', 'false' - systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") - systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") - exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' - exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' - exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' + systemProperty 'tests.is_old_cluster', 'false' + systemProperty 'tests.old_cluster_version', version.toString().minus("-SNAPSHOT") + systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + exclude 'org/elasticsearch/upgrades/FullClusterRestartIT.class' + exclude 'org/elasticsearch/upgrades/FullClusterRestartSettingsUpgradeIT.class' + exclude 'org/elasticsearch/upgrades/QueryBuilderBWCIT.class' } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { - dependsOn = [upgradedClusterTest] + dependsOn = [upgradedClusterTest] } if (project.bwc_tests_enabled) { - bwcTest.dependsOn(versionBwcTest) + bwcTest.dependsOn(versionBwcTest) } - } - - unitTest.enabled = false // no unit tests for full cluster restarts, only the rest integration test +} - // basic integ tests includes testing bwc against the most recent version - task bwcTestSnapshots { +// basic integ tests includes testing bwc against the most recent version +task bwcTestSnapshots { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.unreleasedIndexCompatible) { - dependsOn "v${version}#bwcTest" - } + for (final def version : bwcVersions.unreleasedIndexCompatible) { + dependsOn "v${version}#bwcTest" + } } - } - - check.dependsOn(bwcTestSnapshots) +} - dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here - testCompile project(path: xpackModule('core'), configuration: 'default') - testCompile project(path: xpackModule('watcher'), configuration: 'runtime') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') - } +check.dependsOn(bwcTestSnapshots) - // copy x-pack plugin info so it is on the classpath and security manager has the right permissions - task copyXPackRestSpec(type: Copy) { +// copy x-pack plugin info so it is on the classpath and security manager has the right permissions +task copyXPackRestSpec(type: Copy) { dependsOn(project.configurations.restSpec, 'processTestResources') from project(xpackModule('core')).sourceSets.test.resources include 'rest-api-spec/api/**' into project.sourceSets.test.output.resourcesDir - } +} - task copyXPackPluginProps(type: Copy) { +task copyXPackPluginProps(type: Copy) { dependsOn(copyXPackRestSpec) from project(xpackModule('core')).file('src/main/plugin-metadata') from project(xpackModule('core')).tasks.pluginProperties into outputDir - } - project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - - repositories { - maven { - url "https://artifacts.elastic.co/maven" - } - maven { - url "https://snapshots.elastic.co/maven" - } - } } +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) diff --git a/x-pack/qa/full-cluster-restart/with-system-key/build.gradle b/x-pack/qa/full-cluster-restart/with-system-key/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/qa/full-cluster-restart/without-system-key/build.gradle b/x-pack/qa/full-cluster-restart/without-system-key/build.gradle deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/x-pack/qa/kerberos-tests/build.gradle b/x-pack/qa/kerberos-tests/build.gradle index a59becbfe6b54..50b709f77dca5 100644 --- a/x-pack/qa/kerberos-tests/build.gradle +++ b/x-pack/qa/kerberos-tests/build.gradle @@ -2,9 +2,11 @@ import java.nio.file.Path import java.nio.file.Paths import java.nio.file.Files -apply plugin: 'elasticsearch.vagrantsupport' apply plugin: 'elasticsearch.standalone-rest-test' apply plugin: 'elasticsearch.rest-test' +apply plugin: 'elasticsearch.test.fixtures' + +testFixtures.useFixture ":test:fixtures:krb5kdc-fixture" dependencies { testCompile "org.elasticsearch.plugin:x-pack-core:${version}" @@ -12,75 +14,6 @@ dependencies { testCompile project(path: xpackModule('security'), configuration: 'testArtifacts') } -// MIT Kerberos Vagrant Testing Fixture -String box = "krb5kdc" -Map vagrantEnvVars = [ - 'VAGRANT_CWD' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}", - 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', - 'VAGRANT_PROJECT_DIR' : "${project(':test:fixtures:krb5kdc-fixture').projectDir}" -] - -task krb5kdcUpdate(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'box' - subcommand 'update' - boxName box - environmentVars vagrantEnvVars - dependsOn "vagrantCheckVersion", "virtualboxCheckVersion" -} - -task krb5kdcFixture(type: org.elasticsearch.gradle.test.VagrantFixture) { - command 'up' - args '--provision', '--provider', 'virtualbox' - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcUpdate -} - -// lazily resolve to avoid any slowdowns from DNS lookups prior to when we need this value -Object httpPrincipal = new Object() { - @Override - String toString() { - InetAddress resolvedAddress = InetAddress.getByName('127.0.0.1') - return "HTTP/" + resolvedAddress.getCanonicalHostName() - } -} - -String realm = "BUILD.ELASTIC.CO" - -task 'addPrincipal#peppa'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh peppa " - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task 'addPrincipal#george'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh george dino" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task 'addPrincipal#HTTP'(type: org.elasticsearch.gradle.vagrant.VagrantCommandTask) { - command 'ssh' - args '--command', "sudo bash /vagrant/src/main/resources/provision/addprinc.sh $httpPrincipal" - boxName box - environmentVars vagrantEnvVars - dependsOn krb5kdcFixture -} - -task krb5AddPrincipals { dependsOn krb5kdcFixture, 'addPrincipal#peppa', 'addPrincipal#george', 'addPrincipal#HTTP' } - -def generatedResources = "$buildDir/generated-resources/keytabs" -task copyKeytabToGeneratedResources(type: Copy) { - Path peppaKeytab = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs").resolve("peppa.keytab").toAbsolutePath() - from peppaKeytab; - into generatedResources - dependsOn krb5AddPrincipals -} - integTestCluster { // force localhost IPv4 otherwise it is a chicken and egg problem where we need the keytab for the hostname when starting the cluster // but do not know the exact address that is first in the http ports file @@ -96,12 +29,10 @@ integTestCluster { setting 'xpack.security.authc.realms.kerberos.kerberos.krb.debug', 'true' setting 'xpack.security.authc.realms.kerberos.kerberos.remove_realm_name', 'false' - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - String jvmArgsStr = " -Djava.security.krb5.conf=${krb5conf}" + " -Dsun.security.krb5.debug=true" - jvmArgs jvmArgsStr - Path esKeytab = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("keytabs") - .resolve("$httpPrincipal".replace('/', '_') + ".keytab").toAbsolutePath() - extraConfigFile("es.keytab", "${esKeytab}") + jvmArgs += " -Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("peppa")}" + jvmArgs += " -Dsun.security.krb5.debug=true" + + extraConfigFile("es.keytab", project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("peppa", "HTTP_localhost.keytab")) setupCommand 'setupTestAdmin', 'bin/elasticsearch-users', 'useradd', "test_admin", '-p', 'x-pack-test-password', '-r', "superuser" @@ -119,6 +50,7 @@ integTestCluster { } +String realm = "BUILD.ELASTIC.CO" integTestRunner { Path peppaKeytab = Paths.get("${project.buildDir}", "generated-resources", "keytabs", "peppa.keytab") systemProperty 'test.userkt', "peppa@${realm}" @@ -126,16 +58,17 @@ integTestRunner { systemProperty 'test.userpwd', "george@${realm}" systemProperty 'test.userpwd.password', "dino" systemProperty 'tests.security.manager', 'true' - Path krb5conf = project(':test:fixtures:krb5kdc-fixture').buildDir.toPath().resolve("conf").resolve("krb5.conf").toAbsolutePath() - List jvmargs = ["-Djava.security.krb5.conf=${krb5conf}","-Dsun.security.krb5.debug=true"] - jvmArgs jvmargs + jvmArgs([ + "-Djava.security.krb5.conf=${project(':test:fixtures:krb5kdc-fixture').ext.krb5Conf("peppa")}", + "-Dsun.security.krb5.debug=true" + ]) } -if (project.rootProject.vagrantSupported == false) { - integTest.enabled = false - testingConventions.enabled = false -} else { - project.sourceSets.test.output.dir(generatedResources) - integTestCluster.dependsOn krb5AddPrincipals, krb5kdcFixture, copyKeytabToGeneratedResources - integTest.finalizedBy project(':test:fixtures:krb5kdc-fixture').halt +def generatedResources = "$buildDir/generated-resources/keytabs" +task copyKeytabToGeneratedResources(type: Copy) { + from project(':test:fixtures:krb5kdc-fixture').ext.krb5Keytabs("peppa", "peppa.keytab") + into generatedResources + dependsOn project(':test:fixtures:krb5kdc-fixture').postProcessFixture } +project.sourceSets.test.output.dir(generatedResources, builtBy:copyKeytabToGeneratedResources) + diff --git a/x-pack/qa/rolling-upgrade/build.gradle b/x-pack/qa/rolling-upgrade/build.gradle index f8222669b218e..f689573a61437 100644 --- a/x-pack/qa/rolling-upgrade/build.gradle +++ b/x-pack/qa/rolling-upgrade/build.gradle @@ -3,10 +3,10 @@ import org.elasticsearch.gradle.test.RestIntegTestTask import org.elasticsearch.gradle.Version import java.nio.charset.StandardCharsets -import java.util.regex.Matcher // Apply the java plugin to this project so the sources can be edited in an IDE -apply plugin: 'elasticsearch.build' +apply plugin: 'elasticsearch.standalone-test' + unitTest.enabled = false dependencies { @@ -68,161 +68,50 @@ Closure waitWithAuth = { NodeInfo node, AntBuilder ant -> return tmpFile.exists() } -Project mainProject = project - compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" forbiddenPatterns { exclude '**/system_key' } -// Tests are pushed down to subprojects -testingConventions.enabled = false - -/** - * Subdirectories of this project are test rolling upgrades with various - * configuration options based on their name. - */ -subprojects { - Matcher m = project.name =~ /with(out)?-system-key/ - if (false == m.matches()) { - throw new InvalidUserDataException("Invalid project name [${project.name}]") - } - boolean withSystemKey = m.group(1) == null - - apply plugin: 'elasticsearch.standalone-test' - - // Use resources from the rolling-upgrade project in subdirectories - sourceSets { - test { - java { - srcDirs = ["${mainProject.projectDir}/src/test/java"] - } - resources { - srcDirs = ["${mainProject.projectDir}/src/test/resources"] - } - } - } - - forbiddenPatterns { - exclude '**/system_key' - } +String outputDir = "${buildDir}/generated-resources/${project.name}" - String outputDir = "${buildDir}/generated-resources/${project.name}" - - // This is a top level task which we will add dependencies to below. - // It is a single task that can be used to backcompat tests against all versions. - task bwcTest { +// This is a top level task which we will add dependencies to below. +// It is a single task that can be used to backcompat tests against all versions. +task bwcTest { description = 'Runs backwards compatibility tests.' group = 'verification' - } +} - String output = "${buildDir}/generated-resources/${project.name}" - task copyTestNodeKeyMaterial(type: Copy) { +task copyTestNodeKeyMaterial(type: Copy) { from project(':x-pack:plugin:core').files('src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', - 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt', + 'src/test/resources/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks') into outputDir - } +} - for (Version version : bwcVersions.wireCompatible) { +for (Version version : bwcVersions.wireCompatible) { String baseName = "v${version}" Task oldClusterTest = tasks.create(name: "${baseName}#oldClusterTest", type: RestIntegTestTask) { - mustRunAfter(precommit) + mustRunAfter(precommit) } configure(extensions.findByName("${baseName}#oldClusterTestCluster")) { - dependsOn copyTestNodeKeyMaterial - if (version.before('6.3.0')) { - String depVersion = version; - if (project.bwcVersions.unreleased.contains(version)) { - depVersion += "-SNAPSHOT" - } - mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" - } - String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' - setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' - bwcVersion = version - numBwcNodes = 3 - numNodes = 3 - clusterName = 'rolling-upgrade' - waitCondition = waitWithAuth - setting 'xpack.monitoring.exporters._http.type', 'http' - setting 'xpack.monitoring.exporters._http.enabled', 'false' - setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' - setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'true' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' - } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' - } - dependsOn copyTestNodeKeyMaterial - extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') - extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') - extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') - if (version.onOrAfter('7.0.0')) { - setting 'xpack.security.authc.realms.file.file1.order', '0' - setting 'xpack.security.authc.realms.native.native1.order', '1' - } else { - setting 'xpack.security.authc.realms.file1.type', 'file' - setting 'xpack.security.authc.realms.file1.order', '0' - setting 'xpack.security.authc.realms.native1.type', 'native' - setting 'xpack.security.authc.realms.native1.order', '1' - } - - if (withSystemKey) { - if (version.onOrAfter('5.1.0') && version.before('6.0.0')) { - // The setting didn't exist until 5.1.0 - setting 'xpack.security.system_key.required', 'true' - } - if (version.onOrAfter('6.0.0')) { - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" - } else { - String systemKeyFile = version.before('6.3.0') ? 'x-pack/system_key' : 'system_key' - extraConfigFile systemKeyFile, "${mainProject.projectDir}/src/test/resources/system_key" - keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + dependsOn copyTestNodeKeyMaterial + if (version.before('6.3.0')) { + String depVersion = version; + if (project.bwcVersions.unreleased.contains(version)) { + depVersion += "-SNAPSHOT" + } + mavenPlugin 'x-pack', "org.elasticsearch.plugin:x-pack:${depVersion}" } - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - } - - if (version.onOrAfter('6.6.0')) { - setting 'ccr.auto_follow.wait_for_metadata_timeout', '1s' - } - - // Old versions of the code contain an invalid assertion that trips - // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing - // the assertion, but this is impossible for released versions. - // However, released versions run without assertions, so end users won't - // be suffering the effects. This argument effectively removes the - // incorrect assertion from the older versions used in the BWC tests. - if (version.before('5.6.9') || (version.onOrAfter('6.0.0') && version.before('6.2.4'))) { - jvmArgs '-da:org.elasticsearch.xpack.monitoring.exporter.http.HttpExportBulk' - } - } - - Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") - oldClusterTestRunner.configure { - systemProperty 'tests.rest.suite', 'old_cluster' - } - - Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> - configure(extensions.findByName("${baseName}#${name}")) { - dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" - setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + String usersCli = version.before('6.3.0') ? 'bin/x-pack/users' : 'bin/elasticsearch-users' + setupCommand 'setupTestUser', usersCli, 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + bwcVersion = version + numBwcNodes = 3 + numNodes = 3 clusterName = 'rolling-upgrade' - otherUnicastHostAddresses = { getOtherUnicastHostAddresses() } - /* Override the data directory so the new node always gets the node we - * just stopped's data directory. */ - dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } waitCondition = waitWithAuth setting 'xpack.monitoring.exporters._http.type', 'http' setting 'xpack.monitoring.exporters._http.enabled', 'false' @@ -231,154 +120,195 @@ subprojects { setting 'xpack.license.self_generated.type', 'trial' setting 'xpack.security.enabled', 'true' setting 'xpack.security.transport.ssl.enabled', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' if (project.inFipsJvm) { - setting 'xpack.security.transport.ssl.key', 'testnode.pem' - setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' - keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' } else { - setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' - setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' } - setting 'node.attr.upgraded', 'true' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.audit.enabled', 'true' - setting 'node.name', "upgraded-node-${stopNode}" dependsOn copyTestNodeKeyMaterial extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') if (version.onOrAfter('7.0.0')) { - setting 'xpack.security.authc.realms.file.file1.order', '0' - setting 'xpack.security.authc.realms.native.native1.order', '1' + setting 'xpack.security.authc.realms.file.file1.order', '0' + setting 'xpack.security.authc.realms.native.native1.order', '1' } else { - setting 'xpack.security.authc.realms.file1.type', 'file' - setting 'xpack.security.authc.realms.file1.order', '0' - setting 'xpack.security.authc.realms.native1.type', 'native' - setting 'xpack.security.authc.realms.native1.order', '1' + setting 'xpack.security.authc.realms.file1.type', 'file' + setting 'xpack.security.authc.realms.file1.order', '0' + setting 'xpack.security.authc.realms.native1.type', 'native' + setting 'xpack.security.authc.realms.native1.order', '1' + } + + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + + if (version.onOrAfter('6.6.0')) { + setting 'ccr.auto_follow.wait_for_metadata_timeout', '1s' } - if (withSystemKey) { - setting 'xpack.watcher.encrypt_sensitive_data', 'true' - keystoreFile 'xpack.watcher.encryption_key', "${mainProject.projectDir}/src/test/resources/system_key" + + // Old versions of the code contain an invalid assertion that trips + // during tests. Versions 5.6.9 and 6.2.4 have been fixed by removing + // the assertion, but this is impossible for released versions. + // However, released versions run without assertions, so end users won't + // be suffering the effects. This argument effectively removes the + // incorrect assertion from the older versions used in the BWC tests. + if (version.before('5.6.9') || (version.onOrAfter('6.0.0') && version.before('6.2.4'))) { + jvmArgs '-da:org.elasticsearch.xpack.monitoring.exporter.http.HttpExportBulk' } - if (version.before('6.0.0')) { - keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + } + + Task oldClusterTestRunner = tasks.getByName("${baseName}#oldClusterTestRunner") + oldClusterTestRunner.configure { + systemProperty 'tests.rest.suite', 'old_cluster' + } + + Closure configureUpgradeCluster = {String name, Task lastRunner, int stopNode, Closure getOtherUnicastHostAddresses -> + configure(extensions.findByName("${baseName}#${name}")) { + dependsOn lastRunner, "${baseName}#oldClusterTestCluster#node${stopNode}.stop" + setupCommand 'setupTestUser', 'bin/elasticsearch-users', 'useradd', 'test_user', '-p', 'x-pack-test-password', '-r', 'superuser' + clusterName = 'rolling-upgrade' + otherUnicastHostAddresses = { getOtherUnicastHostAddresses() } + /* Override the data directory so the new node always gets the node we + * just stopped's data directory. */ + dataDir = { nodeNumber -> oldClusterTest.nodes[stopNode].dataDir } + waitCondition = waitWithAuth + setting 'xpack.monitoring.exporters._http.type', 'http' + setting 'xpack.monitoring.exporters._http.enabled', 'false' + setting 'xpack.monitoring.exporters._http.auth.username', 'test_user' + setting 'xpack.monitoring.exporters._http.auth.password', 'x-pack-test-password' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.security.enabled', 'true' + setting 'xpack.security.transport.ssl.enabled', 'true' + if (project.inFipsJvm) { + setting 'xpack.security.transport.ssl.key', 'testnode.pem' + setting 'xpack.security.transport.ssl.certificate', 'testnode.crt' + keystoreSetting 'xpack.security.transport.ssl.secure_key_passphrase', 'testnode' + } else { + setting 'xpack.security.transport.ssl.keystore.path', 'testnode.jks' + setting 'xpack.security.transport.ssl.keystore.password', 'testnode' + } + setting 'node.attr.upgraded', 'true' + setting 'xpack.security.authc.token.enabled', 'true' + setting 'xpack.security.audit.enabled', 'true' + setting 'node.name', "upgraded-node-${stopNode}" + dependsOn copyTestNodeKeyMaterial + extraConfigFile 'testnode.jks', new File(outputDir + '/testnode.jks') + extraConfigFile 'testnode.pem', new File(outputDir + '/testnode.pem') + extraConfigFile 'testnode.crt', new File(outputDir + '/testnode.crt') + if (version.onOrAfter('7.0.0')) { + setting 'xpack.security.authc.realms.file.file1.order', '0' + setting 'xpack.security.authc.realms.native.native1.order', '1' + } else { + setting 'xpack.security.authc.realms.file1.type', 'file' + setting 'xpack.security.authc.realms.file1.order', '0' + setting 'xpack.security.authc.realms.native1.type', 'native' + setting 'xpack.security.authc.realms.native1.order', '1' + } + setting 'xpack.watcher.encrypt_sensitive_data', 'true' + keystoreFile 'xpack.watcher.encryption_key', "${project.projectDir}/src/test/resources/system_key" + if (version.before('6.0.0')) { + keystoreSetting 'xpack.security.authc.token.passphrase', 'token passphrase' + } } - } } Task oneThirdUpgradedTest = tasks.create(name: "${baseName}#oneThirdUpgradedTest", type: RestIntegTestTask) configureUpgradeCluster("oneThirdUpgradedTestCluster", oldClusterTestRunner, 0, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oldClusterTest.nodes.get(1).transportUri(), oldClusterTest.nodes.get(2).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oldClusterTest.nodes.get(1).transportUri(), oldClusterTest.nodes.get(2).transportUri()] }) Task oneThirdUpgradedTestRunner = tasks.getByName("${baseName}#oneThirdUpgradedTestRunner") oneThirdUpgradedTestRunner.configure { - systemProperty 'tests.rest.suite', 'mixed_cluster' - systemProperty 'tests.first_round', 'true' - // We only need to run these tests once so we may as well do it when we're two thirds upgraded - systemProperty 'tests.rest.blacklist', [ - 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', - 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', - 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster', + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'true' + // We only need to run these tests once so we may as well do it when we're two thirds upgraded + systemProperty 'tests.rest.blacklist', [ + 'mixed_cluster/10_basic/Start scroll in mixed cluster on upgraded node that we will continue after upgrade', + 'mixed_cluster/30_ml_jobs_crud/Create a job in the mixed cluster and write some data', + 'mixed_cluster/40_ml_datafeed_crud/Put job and datafeed in mixed cluster', ].join(',') - finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" + finalizedBy "${baseName}#oldClusterTestCluster#node1.stop" } Task twoThirdsUpgradedTest = tasks.create(name: "${baseName}#twoThirdsUpgradedTest", type: RestIntegTestTask) configureUpgradeCluster("twoThirdsUpgradedTestCluster", oneThirdUpgradedTestRunner, 1, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oldClusterTest.nodes.get(2).transportUri(), oneThirdUpgradedTest.nodes.get(0).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oldClusterTest.nodes.get(2).transportUri(), oneThirdUpgradedTest.nodes.get(0).transportUri()] }) Task twoThirdsUpgradedTestRunner = tasks.getByName("${baseName}#twoThirdsUpgradedTestRunner") twoThirdsUpgradedTestRunner.configure { - systemProperty 'tests.rest.suite', 'mixed_cluster' - systemProperty 'tests.first_round', 'false' - finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" + systemProperty 'tests.rest.suite', 'mixed_cluster' + systemProperty 'tests.first_round', 'false' + finalizedBy "${baseName}#oldClusterTestCluster#node2.stop" } Task upgradedClusterTest = tasks.create(name: "${baseName}#upgradedClusterTest", type: RestIntegTestTask) configureUpgradeCluster("upgradedClusterTestCluster", twoThirdsUpgradedTestRunner, 2, - // Use all running nodes as seed nodes so there is no race between pinging and the tests - { [oneThirdUpgradedTest.nodes.get(0).transportUri(), twoThirdsUpgradedTest.nodes.get(0).transportUri()] }) + // Use all running nodes as seed nodes so there is no race between pinging and the tests + { [oneThirdUpgradedTest.nodes.get(0).transportUri(), twoThirdsUpgradedTest.nodes.get(0).transportUri()] }) Task upgradedClusterTestRunner = tasks.getByName("${baseName}#upgradedClusterTestRunner") upgradedClusterTestRunner.configure { - systemProperty 'tests.rest.suite', 'upgraded_cluster' - /* - * Force stopping all the upgraded nodes after the test runner - * so they are alive during the test. - */ - finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" - finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" - - // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. - // this stinks but we do the check here since our rest tests do not support conditionals - // otherwise we could check the index created version - String versionStr = project.extensions.findByName("${baseName}#oldClusterTestCluster").properties.get('bwcVersion') - String[] versionParts = versionStr.split('\\.') - if (versionParts[0].equals("5")) { - Integer minor = Integer.parseInt(versionParts[1]) - if (minor >= 2) { - systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' - } - } + systemProperty 'tests.rest.suite', 'upgraded_cluster' + /* + * Force stopping all the upgraded nodes after the test runner + * so they are alive during the test. + */ + finalizedBy "${baseName}#oneThirdUpgradedTestCluster#stop" + finalizedBy "${baseName}#twoThirdsUpgradedTestCluster#stop" + + // migration tests should only run when the original/old cluster nodes where versions < 5.2.0. + // this stinks but we do the check here since our rest tests do not support conditionals + // otherwise we could check the index created version + String versionStr = project.extensions.findByName("${baseName}#oldClusterTestCluster").properties.get('bwcVersion') + String[] versionParts = versionStr.split('\\.') + if (versionParts[0].equals("5")) { + Integer minor = Integer.parseInt(versionParts[1]) + if (minor >= 2) { + systemProperty 'tests.rest.blacklist', '/20_security/Verify default password migration results in upgraded cluster' + } + } } Task versionBwcTest = tasks.create(name: "${baseName}#bwcTest") { - dependsOn = [upgradedClusterTest] + dependsOn = [upgradedClusterTest] } if (project.bwc_tests_enabled) { - bwcTest.dependsOn(versionBwcTest) + bwcTest.dependsOn(versionBwcTest) } - } - - unitTest.enabled = false // no unit tests for rolling upgrades, only the rest integration test +} - // basic integ tests includes testing bwc against the most recent version - task bwcTestSnapshots { +// basic integ tests includes testing bwc against the most recent version +task bwcTestSnapshots { if (project.bwc_tests_enabled) { - for (final def version : bwcVersions.unreleasedWireCompatible) { - dependsOn "v${version}#bwcTest" - } + for (final def version : bwcVersions.unreleasedWireCompatible) { + dependsOn "v${version}#bwcTest" + } } - } - check.dependsOn(bwcTestSnapshots) - - dependencies { - // "org.elasticsearch.plugin:x-pack-core:${version}" doesn't work with idea because the testArtifacts are also here - testCompile project(path: xpackModule('core'), configuration: 'default') - testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') - testCompile project(path: xpackModule('watcher')) - } - - compileTestJava.options.compilerArgs << "-Xlint:-cast,-deprecation,-rawtypes,-try,-unchecked" +} +check.dependsOn(bwcTestSnapshots) - // copy x-pack plugin info so it is on the classpath and security manager has the right permissions - task copyXPackRestSpec(type: Copy) { +// copy x-pack plugin info so it is on the classpath and security manager has the right permissions +task copyXPackRestSpec(type: Copy) { dependsOn(project.configurations.restSpec, 'processTestResources') from project(xpackProject('plugin').path).sourceSets.test.resources include 'rest-api-spec/api/**' into project.sourceSets.test.output.resourcesDir - } +} - task copyXPackPluginProps(type: Copy) { +task copyXPackPluginProps(type: Copy) { dependsOn(copyXPackRestSpec) from project(xpackModule('core')).file('src/main/plugin-metadata') from project(xpackModule('core')).tasks.pluginProperties into outputDir - } - project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) - - repositories { - maven { - url "https://artifacts.elastic.co/maven" - } - maven { - url "https://snapshots.elastic.co/maven" - } - } } +project.sourceSets.test.output.dir(outputDir, builtBy: copyXPackPluginProps) diff --git a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle b/x-pack/qa/rolling-upgrade/with-system-key/build.gradle deleted file mode 100644 index 03505e01dedd8..0000000000000 --- a/x-pack/qa/rolling-upgrade/with-system-key/build.gradle +++ /dev/null @@ -1 +0,0 @@ -group = "${group}.x-pack.qa.rolling-upgrade.with-system-key" diff --git a/x-pack/qa/rolling-upgrade/without-system-key/build.gradle b/x-pack/qa/rolling-upgrade/without-system-key/build.gradle deleted file mode 100644 index aa7ac502eb3e6..0000000000000 --- a/x-pack/qa/rolling-upgrade/without-system-key/build.gradle +++ /dev/null @@ -1 +0,0 @@ -group = "${group}.x-pack.qa.rolling-upgrade.without-system-key" diff --git a/x-pack/qa/saml-idp-tests/build.gradle b/x-pack/qa/saml-idp-tests/build.gradle index 44a28278636a9..7b76321fe9d4f 100644 --- a/x-pack/qa/saml-idp-tests/build.gradle +++ b/x-pack/qa/saml-idp-tests/build.gradle @@ -38,6 +38,9 @@ task setupPorts { idpMetaFile.write(content.toString(), "UTF-8") } } +// Don't attempt to get ephemeral ports when Docker is not available +setupPorts.onlyIf { idpFixtureProject.postProcessFixture.enabled } + integTestCluster.dependsOn setupPorts integTestCluster {