diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 2e4a6ede754ad..15cdfdd1c52ba 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -303,15 +303,16 @@ comma separated list of nodes to connect to (e.g. localhost:9300). A transport c be created based on that and used for all the before|after test operations, and to extract the http addresses of the nodes so that REST requests can be sent to them. -== Testing scripts +== Testing packaging -The simplest way to test scripts and the packaged distributions is to use -Vagrant. You can get started by following there five easy steps: +The packaging tests use Vagrant virtual machines to verify that installing +and running elasticsearch distributions works correctly on supported operating systems. +These tests should really only be run in vagrant vms because they're destructive. . Install Virtual Box and Vagrant. -. (Optional) Install vagrant-cachier to squeeze a bit more performance out of -the process: +. (Optional) Install https://github.com/fgrehm/vagrant-cachier[vagrant-cachier] to squeeze +a bit more performance out of the process: -------------------------------------- vagrant plugin install vagrant-cachier @@ -325,26 +326,39 @@ vagrant plugin install vagrant-cachier . Download and smoke test the VMs with `./gradlew vagrantSmokeTest` or `./gradlew -Pvagrant.boxes=all vagrantSmokeTest`. The first time you run this it will -download the base images and provision the boxes and immediately quit. If you -you this again it'll skip the download step. +download the base images and provision the boxes and immediately quit. Downloading all +the images may take a long time. After the images are already on your machine, they won't +be downloaded again unless they have been updated to a new version. . Run the tests with `./gradlew packagingTest`. This will cause Gradle to build the tar, zip, and deb packages and all the plugins. It will then run the tests on ubuntu-1404 and centos-7. We chose those two distributions as the default because they cover deb and rpm packaging and SyvVinit and systemd. -You can run on all the VMs by running `./gradlew -Pvagrant.boxes=all -packagingTest`. You can run a particular VM with a command like `./gradlew --Pvagrant.boxes=oel-7 packagingTest`. See `./gradlew tasks` for a complete list -of available vagrant boxes for testing. It's important to know that if you -interrupt any of these Gradle commands then the boxes will remain running and -you'll have to terminate them with `./gradlew stop`. +You can choose which boxes to test by setting the `-Pvagrant.boxes` project property. All of +the valid options for this property are: + +* `sample` - The default, only chooses ubuntu-1404 and centos-7 +* List of box names, comma separated (e.g. `oel-7,fedora-26`) - Chooses exactly the boxes listed. +* `linux-all` - All linux boxes. +* `windows-all` - All Windows boxes. If there are any Windows boxes which do not +have images available when this value is provided, the build will fail. +* `all` - All boxes we test. If there are any boxes (e.g. Windows) which do not have images +available when this value is provided, the build will fail. + +For a complete list of boxes on which tests can be run, run `./gradlew :qa:vagrant:listAllBoxes`. +For a list of boxes that have images available from your configuration, run +`./gradlew :qa:vagrant:listAvailableBoxes` + +Note that if you interrupt gradle in the middle of running these tasks, any boxes started +will remain running and you'll have to stop them manually with `./gradlew stop` or +`vagrant halt`. All the regular vagrant commands should just work so you can get a shell in a VM running trusty by running `vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404`. -These are the linux flavors the Vagrantfile currently supports: +These are the linux flavors supported, all of which we provide images for * ubuntu-1404 aka trusty * ubuntu-1604 aka xenial @@ -364,9 +378,42 @@ quality boxes available in vagrant atlas: * sles-11 -We're missing the following because our tests are very linux/bash centric: +=== Testing packaging on Windows + +The packaging tests also support Windows Server 2012R2 and Windows Server 2016. +Unfortunately we're not able to provide boxes for them in open source use +because of licensing issues. Any Virtualbox image that has WinRM and Powershell +enabled for remote users should work. + +Testing on Windows requires the https://github.com/criteo/vagrant-winrm[vagrant-winrm] plugin. + +------------------------------------ +vagrant plugin install vagrant-winrm +------------------------------------ + +Specify the image IDs of the Windows boxes to gradle with the following project +properties. They can be set in `~/.gradle/gradle.properties` like -* Windows Server 2012 +------------------------------------ +vagrant.windows-2012r2.id=my-image-id +vagrant.windows-2016.id=another-image-id +------------------------------------ + +or passed on the command line like `-Pvagrant.windows-2012r2.id=my-image-id` +`-Pvagrant.windows-2016=another-image-id` + +These properties are required for Windows support in all gradle tasks that +handle packaging tests. Either or both may be specified. Remember that to run tests +on these boxes, the project property `vagrant.boxes` still needs to be set to a +value that will include them. + +If you're running vagrant commands outside of gradle, specify the Windows boxes +with the environment variables + +* `VAGRANT_WINDOWS_2012R2_BOX` +* `VAGRANT_WINDOWS_2016_BOX` + +=== Testing VMs are disposable It's important to think of VMs like cattle. If they become lame you just shoot them and let vagrant reprovision them. Say you've hosed your precise VM: @@ -399,54 +446,62 @@ vagrant destroy -f `vagrant up` would normally start all the VMs but we've prevented that because that'd consume a ton of ram. -== Testing scripts more directly +=== Iterating on packaging tests -In general its best to stick to testing in vagrant because the bats scripts are -destructive. When working with a single package it's generally faster to run its -tests in a tighter loop than Gradle provides. In one window: +Running the packaging tests through gradle can take a while because it will start +and stop the VM each time. You can iterate faster by keeping the VM up and running +the tests directly. --------------------------------- -./gradlew :distribution:packages:rpm:assemble --------------------------------- +The packaging tests use a random seed to determine which past version to use for +testing upgrades. To use a single past version fix the test seed when running +the commands below (see <>) -and in another window: +First build the packaging tests and their dependencies ----------------------------------------------------- -vagrant up centos-7 --provider virtualbox && vagrant ssh centos-7 -cd $PACKAGING_ARCHIVES -sudo -E bats $BATS_TESTS/*rpm*.bats ----------------------------------------------------- +-------------------------------------------- +./gradlew :qa:vagrant:setupPackagingTest +-------------------------------------------- -If you wanted to retest all the release artifacts on a single VM you could: +Then choose the VM you want to test on and bring it up. For example, to bring +up Debian 9 use the gradle command below. Bringing the box up with vagrant directly +may not mount the packaging test project in the right place. Once the VM is up, ssh +into it -------------------------------------------------- -./gradlew setupPackagingTest -cd qa/vagrant; vagrant up ubuntu-1404 --provider virtualbox && vagrant ssh ubuntu-1404 +-------------------------------------------- +./gradlew :qa:vagrant:vagrantDebian9#up +vagrant ssh debian-9 +-------------------------------------------- + +Now inside the VM, to run the https://github.com/sstephenson/bats[bats] packaging tests + +-------------------------------------------- cd $PACKAGING_ARCHIVES -sudo -E bats $BATS_TESTS/*.bats -------------------------------------------------- -You can also use Gradle to prepare the test environment and then starts a single VM: +# runs all bats tests +sudo bats $BATS_TESTS/*.bats -------------------------------------------------- -./gradlew vagrantFedora27#up -------------------------------------------------- +# you can also pass specific test files +sudo bats $BATS_TESTS/20_tar_package.bats $BATS_TESTS/25_tar_plugins.bats +-------------------------------------------- -Or any of vagrantCentos6#up, vagrantCentos7#up, vagrantDebian8#up, -vagrantDebian9#up, vagrantFedora26#up, vagrantFedora27#up, vagrantOel6#up, vagrantOel7#up, -vagrantOpensuse42#up,vagrantSles12#up, vagrantUbuntu1404#up, vagrantUbuntu1604#up. +To run the Java packaging tests, again inside the VM -Once up, you can then connect to the VM using SSH from the elasticsearch directory: +-------------------------------------------- +bash $PACKAGING_TESTS/run-tests.sh +-------------------------------------------- -------------------------------------------------- -vagrant ssh fedora-27 -------------------------------------------------- +or on Windows -Or from another directory: +-------------------------------------------- +powershell -File $Env:PACKAGING_TESTS/run-tests.ps1 +-------------------------------------------- -------------------------------------------------- -VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-27 -------------------------------------------------- +When you've made changes you want to test, keep the VM up and reload the tests and +distributions inside by running (on the host) + +-------------------------------------------- +./gradlew :qa:vagrant:clean :qa:vagrant:setupPackagingTest +-------------------------------------------- Note: Starting vagrant VM outside of the elasticsearch folder requires to indicates the folder that contains the Vagrantfile using the VAGRANT_CWD diff --git a/Vagrantfile b/Vagrantfile index 6761fec07dab2..1c259c1125f00 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -121,6 +121,26 @@ Vagrant.configure(2) do |config| sles_common config, box end end + + windows_2012r2_box = ENV['VAGRANT_WINDOWS_2012R2_BOX'] + if windows_2012r2_box && windows_2012r2_box.empty? == false + 'windows-2012r2'.tap do |box| + config.vm.define box, define_opts do |config| + config.vm.box = windows_2012r2_box + windows_common config, box + end + end + end + + windows_2016_box = ENV['VAGRANT_WINDOWS_2016_BOX'] + if windows_2016_box && windows_2016_box.empty? == false + 'windows-2016'.tap do |box| + config.vm.define box, define_opts do |config| + config.vm.box = windows_2016_box + windows_common config, box + end + end + end end def deb_common(config, name, extra: '') @@ -353,3 +373,22 @@ SUDOERS_VARS chmod 0440 /etc/sudoers.d/elasticsearch_vars SHELL end + +def windows_common(config, name) + config.vm.provision 'markerfile', type: 'shell', inline: <<-SHELL + $ErrorActionPreference = "Stop" + New-Item C:/is_vagrant_vm -ItemType file -Force | Out-Null + SHELL + + config.vm.provision 'set prompt', type: 'shell', inline: <<-SHELL + $ErrorActionPreference = "Stop" + $ps_prompt = 'function Prompt { "#{name}:$($ExecutionContext.SessionState.Path.CurrentLocation)>" }' + $ps_prompt | Out-File $PsHome/Microsoft.PowerShell_profile.ps1 + SHELL + + config.vm.provision 'set env variables', type: 'shell', inline: <<-SHELL + $ErrorActionPreference = "Stop" + [Environment]::SetEnvironmentVariable("PACKAGING_ARCHIVES", "C:/project/build/packaging/archives", "Machine") + [Environment]::SetEnvironmentVariable("PACKAGING_TESTS", "C:/project/build/packaging/tests", "Machine") + SHELL +end diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy index bb85359ae3f07..72d71f25f69f2 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/vagrant/VagrantTestPlugin.groovy @@ -13,10 +13,12 @@ import org.gradle.api.tasks.Delete import org.gradle.api.tasks.Exec import org.gradle.api.tasks.TaskState +import static java.util.Collections.unmodifiableList + class VagrantTestPlugin implements Plugin { - /** All available boxes **/ - static List BOXES = [ + /** All Linux boxes that we test. These are all always supplied **/ + static final List LINUX_BOXES = unmodifiableList([ 'centos-6', 'centos-7', 'debian-8', @@ -29,26 +31,35 @@ class VagrantTestPlugin implements Plugin { 'sles-12', 'ubuntu-1404', 'ubuntu-1604' - ] + ]) + + /** All Windows boxes that we test, which may or may not be supplied **/ + static final List WINDOWS_BOXES = unmodifiableList([ + 'windows-2012r2', + 'windows-2016' + ]) + + /** All boxes that we test, some of which may not be supplied **/ + static final List ALL_BOXES = unmodifiableList(LINUX_BOXES + WINDOWS_BOXES) /** Boxes used when sampling the tests **/ - static List SAMPLE = [ + static final List SAMPLE = unmodifiableList([ 'centos-7', - 'ubuntu-1404', - ] + 'ubuntu-1404' + ]) /** All distributions to bring into test VM, whether or not they are used **/ - static List DISTRIBUTIONS = [ + static final List DISTRIBUTIONS = unmodifiableList([ 'archives:tar', 'archives:oss-tar', 'packages:rpm', 'packages:oss-rpm', 'packages:deb', 'packages:oss-deb' - ] + ]) /** Packages onboarded for upgrade tests **/ - static List UPGRADE_FROM_ARCHIVES = ['rpm', 'deb'] + static final List UPGRADE_FROM_ARCHIVES = unmodifiableList(['rpm', 'deb']) private static final PACKAGING_CONFIGURATION = 'packaging' private static final PACKAGING_TEST_CONFIGURATION = 'packagingTest' @@ -56,11 +67,19 @@ class VagrantTestPlugin implements Plugin { private static final String BATS_TEST_COMMAND ="cd \$PACKAGING_ARCHIVES && sudo bats --tap \$BATS_TESTS/*.$BATS" private static final String PLATFORM_TEST_COMMAND ="rm -rf ~/elasticsearch && rsync -r /elasticsearch/ ~/elasticsearch && cd ~/elasticsearch && ./gradlew test integTest" + /** Boxes that have been supplied and are available for testing **/ + List availableBoxes = [] + + /** extra env vars to pass to vagrant for box configuration **/ + Map vagrantBoxEnvVars = [:] + @Override void apply(Project project) { + collectAvailableBoxes(project) + // Creates the Vagrant extension for the project - project.extensions.create('esvagrant', VagrantPropertiesExtension, listVagrantBoxes(project)) + project.extensions.create('esvagrant', VagrantPropertiesExtension, listSelectedBoxes(project)) // Add required repositories for packaging tests configurePackagingArchiveRepositories(project) @@ -73,12 +92,17 @@ class VagrantTestPlugin implements Plugin { createVagrantTasks(project) if (project.extensions.esvagrant.boxes == null || project.extensions.esvagrant.boxes.size() == 0) { - throw new InvalidUserDataException('Vagrant boxes cannot be null or empty for esvagrant') + throw new InvalidUserDataException('Must specify at least one vagrant box') } for (String box : project.extensions.esvagrant.boxes) { - if (BOXES.contains(box) == false) { - throw new InvalidUserDataException("Vagrant box [${box}] not found, available virtual machines are ${BOXES}") + if (ALL_BOXES.contains(box) == false) { + throw new InvalidUserDataException("Vagrant box [${box}] is unknown to this plugin. Valid boxes are ${ALL_BOXES}") + } + + if (availableBoxes.contains(box) == false) { + throw new InvalidUserDataException("Vagrant box [${box}] is not available because an image is not supplied for it. " + + "Available boxes with supplied images are ${availableBoxes}") } } @@ -86,14 +110,45 @@ class VagrantTestPlugin implements Plugin { createVagrantBoxesTasks(project) } - private List listVagrantBoxes(Project project) { + /** + * Enumerate all the boxes that we know about and could possibly choose to test + */ + private void collectAvailableBoxes(Project project) { + // these images are hardcoded in the Vagrantfile and are always available + availableBoxes.addAll(LINUX_BOXES) + + // these images need to be provided at runtime + String windows_2012r2_box = project.getProperties().get('vagrant.windows-2012r2.id') + if (windows_2012r2_box != null && windows_2012r2_box.isEmpty() == false) { + availableBoxes.add('windows-2012r2') + vagrantBoxEnvVars['VAGRANT_WINDOWS_2012R2_BOX'] = windows_2012r2_box + } + + String windows_2016_box = project.getProperties().get('vagrant.windows-2016.id') + if (windows_2016_box != null && windows_2016_box.isEmpty() == false) { + availableBoxes.add('windows-2016') + vagrantBoxEnvVars['VAGRANT_WINDOWS_2016_BOX'] = windows_2016_box + } + } + + /** + * Enumerate all the boxes that we have chosen to test + */ + private static List listSelectedBoxes(Project project) { String vagrantBoxes = project.getProperties().get('vagrant.boxes', 'sample') - if (vagrantBoxes == 'sample') { - return SAMPLE - } else if (vagrantBoxes == 'all') { - return BOXES - } else { - return vagrantBoxes.split(',') + switch (vagrantBoxes) { + case 'sample': + return SAMPLE + case 'linux-all': + return LINUX_BOXES + case 'windows-all': + return WINDOWS_BOXES + case 'all': + return ALL_BOXES + case '': + return [] + default: + return vagrantBoxes.split(',') } } @@ -184,11 +239,19 @@ class VagrantTestPlugin implements Plugin { from project.configurations[PACKAGING_TEST_CONFIGURATION] } - Task createTestRunnerScript = project.tasks.create('createTestRunnerScript', FileContentsTask) { + Task createLinuxRunnerScript = project.tasks.create('createLinuxRunnerScript', FileContentsTask) { dependsOn copyPackagingTests file "${testsDir}/run-tests.sh" contents "java -cp \"\$PACKAGING_TESTS/*\" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass}" } + Task createWindowsRunnerScript = project.tasks.create('createWindowsRunnerScript', FileContentsTask) { + dependsOn copyPackagingTests + file "${testsDir}/run-tests.ps1" + contents """\ + java -cp "\$Env:PACKAGING_TESTS/*" org.junit.runner.JUnitCore ${-> project.extensions.esvagrant.testClass} + exit \$LASTEXITCODE + """ + } Task createVersionFile = project.tasks.create('createVersionFile', FileContentsTask) { dependsOn copyPackagingArchives @@ -249,20 +312,24 @@ class VagrantTestPlugin implements Plugin { } Task vagrantSetUpTask = project.tasks.create('setupPackagingTest') - vagrantSetUpTask.dependsOn 'vagrantCheckVersion' - vagrantSetUpTask.dependsOn copyPackagingArchives, copyPackagingTests, createTestRunnerScript - vagrantSetUpTask.dependsOn createVersionFile, createUpgradeFromFile, createUpgradeIsOssFile - vagrantSetUpTask.dependsOn copyBatsTests, copyBatsUtils + vagrantSetUpTask.dependsOn( + 'vagrantCheckVersion', + copyPackagingArchives, + copyPackagingTests, + createLinuxRunnerScript, + createWindowsRunnerScript, + createVersionFile, + createUpgradeFromFile, + createUpgradeIsOssFile, + copyBatsTests, + copyBatsUtils + ) } private static void createPackagingTestTask(Project project) { project.tasks.create('packagingTest') { group 'Verification' - description "Tests yum/apt packages using vagrant and bats.\n" + - " Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" + - " 'sample' can be used to test a single yum and apt box. 'all' can be used to\n" + - " test all available boxes. The available boxes are: \n" + - " ${BOXES}" + description "Tests distribution installation on different platforms using vagrant. See TESTING.asciidoc for details." dependsOn 'vagrantCheckVersion' } } @@ -270,24 +337,49 @@ class VagrantTestPlugin implements Plugin { private static void createPlatformTestTask(Project project) { project.tasks.create('platformTest') { group 'Verification' - description "Test unit and integ tests on different platforms using vagrant.\n" + - " Specify the vagrant boxes to test using the gradle property 'vagrant.boxes'.\n" + - " 'all' can be used to test all available boxes. The available boxes are: \n" + - " ${BOXES}" + description "Test unit and integ tests on different platforms using vagrant. See TESTING.asciidoc for details. This test " + + "is unmaintained." dependsOn 'vagrantCheckVersion' } } - private static void createVagrantTasks(Project project) { + private void createBoxListTasks(Project project) { + project.tasks.create('listAllBoxes') { + group 'Verification' + description 'List all vagrant boxes which can be tested by this plugin' + doLast { + println("All vagrant boxes supported by ${project.path}") + for (String box : ALL_BOXES) { + println(box) + } + } + dependsOn 'vagrantCheckVersion' + } + + project.tasks.create('listAvailableBoxes') { + group 'Verification' + description 'List all vagrant boxes which are available for testing' + doLast { + println("All vagrant boxes available to ${project.path}") + for (String box : availableBoxes) { + println(box) + } + } + dependsOn 'vagrantCheckVersion' + } + } + + private void createVagrantTasks(Project project) { createCleanTask(project) createStopTask(project) createSmokeTestTask(project) createPrepareVagrantTestEnvTask(project) createPackagingTestTask(project) createPlatformTestTask(project) + createBoxListTasks(project) } - private static void createVagrantBoxesTasks(Project project) { + private void createVagrantBoxesTasks(Project project) { assert project.extensions.esvagrant.boxes != null assert project.tasks.stop != null @@ -320,9 +412,10 @@ class VagrantTestPlugin implements Plugin { 'VAGRANT_VAGRANTFILE' : 'Vagrantfile', 'VAGRANT_PROJECT_DIR' : "${project.projectDir.absolutePath}" ] + vagrantEnvVars.putAll(vagrantBoxEnvVars) // Each box gets it own set of tasks - for (String box : BOXES) { + for (String box : availableBoxes) { String boxTask = box.capitalize().replace('-', '') // always add a halt task for all boxes, so clean makes sure they are all shutdown @@ -363,6 +456,7 @@ class VagrantTestPlugin implements Plugin { final Task destroy = project.tasks.create("vagrant${boxTask}#destroy", LoggedExec) { commandLine "bash", "-c", "vagrant status ${box} | grep -q \"${box}\\s\\+not created\" || vagrant destroy ${box} --force" workingDir project.rootProject.rootDir + environment vagrantEnvVars } destroy.onlyIf { vagrantDestroy } update.mustRunAfter(destroy) @@ -386,37 +480,42 @@ class VagrantTestPlugin implements Plugin { environment vagrantEnvVars dependsOn up finalizedBy halt - commandLine 'vagrant', 'ssh', box, '--command', - "set -o pipefail && echo 'Hello from ${project.path}' | sed -ue 's/^/ ${box}: /'" } vagrantSmokeTest.dependsOn(smoke) - - Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) { - remoteCommand BATS_TEST_COMMAND - boxName box - environmentVars vagrantEnvVars - dependsOn up, setupPackagingTest - finalizedBy halt + if (LINUX_BOXES.contains(box)) { + smoke.commandLine = ['vagrant', 'ssh', box, '--command', + "set -o pipefail && echo 'Hello from ${project.path}' | sed -ue 's/^/ ${box}: /'"] + } else { + smoke.commandLine = ['vagrant', 'winrm', box, '--command', + "Write-Host ' ${box}: Hello from ${project.path}'"] } - TaskExecutionAdapter batsPackagingReproListener = createReproListener(project, batsPackagingTest.path) - batsPackagingTest.doFirst { - project.gradle.addListener(batsPackagingReproListener) - } - batsPackagingTest.doLast { - project.gradle.removeListener(batsPackagingReproListener) - } - if (project.extensions.esvagrant.boxes.contains(box)) { - packagingTest.dependsOn(batsPackagingTest) + if (LINUX_BOXES.contains(box)) { + Task batsPackagingTest = project.tasks.create("vagrant${boxTask}#batsPackagingTest", BatsOverVagrantTask) { + remoteCommand BATS_TEST_COMMAND + boxName box + environmentVars vagrantEnvVars + dependsOn up, setupPackagingTest + finalizedBy halt + } + + TaskExecutionAdapter batsPackagingReproListener = createReproListener(project, batsPackagingTest.path) + batsPackagingTest.doFirst { + project.gradle.addListener(batsPackagingReproListener) + } + batsPackagingTest.doLast { + project.gradle.removeListener(batsPackagingReproListener) + } + if (project.extensions.esvagrant.boxes.contains(box)) { + packagingTest.dependsOn(batsPackagingTest) + } } Task javaPackagingTest = project.tasks.create("vagrant${boxTask}#javaPackagingTest", VagrantCommandTask) { - command 'ssh' boxName box environmentVars vagrantEnvVars dependsOn up, setupPackagingTest finalizedBy halt - args '--command', "bash \"\$PACKAGING_TESTS/run-tests.sh\"" } // todo remove this onlyIf after all packaging tests are consolidated @@ -424,6 +523,14 @@ class VagrantTestPlugin implements Plugin { project.extensions.esvagrant.testClass != null } + if (LINUX_BOXES.contains(box)) { + javaPackagingTest.command = 'ssh' + javaPackagingTest.args = ['--command', 'bash "$PACKAGING_TESTS/run-tests.sh"'] + } else { + javaPackagingTest.command = 'winrm' + javaPackagingTest.args = ['--command', 'powershell -File "$Env:PACKAGING_TESTS/run-tests.ps1"'] + } + TaskExecutionAdapter javaPackagingReproListener = createReproListener(project, javaPackagingTest.path) javaPackagingTest.doFirst { project.gradle.addListener(javaPackagingReproListener) @@ -435,23 +542,29 @@ class VagrantTestPlugin implements Plugin { packagingTest.dependsOn(javaPackagingTest) } - Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { - command 'ssh' - boxName box - environmentVars vagrantEnvVars - dependsOn up - finalizedBy halt - args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}" - } - TaskExecutionAdapter platformReproListener = createReproListener(project, platform.path) - platform.doFirst { - project.gradle.addListener(platformReproListener) - } - platform.doLast { - project.gradle.removeListener(platformReproListener) - } - if (project.extensions.esvagrant.boxes.contains(box)) { - platformTest.dependsOn(platform) + /* + * This test is unmaintained and was created to run on Linux. We won't allow it to run on Windows + * until it's been brought back into maintenance + */ + if (LINUX_BOXES.contains(box)) { + Task platform = project.tasks.create("vagrant${boxTask}#platformTest", VagrantCommandTask) { + command 'ssh' + boxName box + environmentVars vagrantEnvVars + dependsOn up + finalizedBy halt + args '--command', PLATFORM_TEST_COMMAND + " -Dtests.seed=${-> project.testSeed}" + } + TaskExecutionAdapter platformReproListener = createReproListener(project, platform.path) + platform.doFirst { + project.gradle.addListener(platformReproListener) + } + platform.doLast { + project.gradle.removeListener(platformReproListener) + } + if (project.extensions.esvagrant.boxes.contains(box)) { + platformTest.dependsOn(platform) + } } } } diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index c273e76a92aed..222de9608aeb9 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -40,6 +40,7 @@ dependencies { compile "org.elasticsearch.plugin:parent-join-client:${version}" compile "org.elasticsearch.plugin:aggs-matrix-stats-client:${version}" compile "org.elasticsearch.plugin:rank-eval-client:${version}" + compile "org.elasticsearch.plugin:lang-mustache-client:${version}" testCompile "org.elasticsearch.client:test:${version}" testCompile "org.elasticsearch.test:framework:${version}" diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java index f3c84db79d65f..e78e4686d6991 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/ClusterClient.java @@ -21,6 +21,8 @@ import org.apache.http.Header; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; @@ -63,4 +65,26 @@ public void putSettingsAsync(ClusterUpdateSettingsRequest clusterUpdateSettingsR restHighLevelClient.performRequestAsyncAndParseEntity(clusterUpdateSettingsRequest, RequestConverters::clusterPutSettings, ClusterUpdateSettingsResponse::fromXContent, listener, emptySet(), headers); } + + /** + * Get current tasks using the Task Management API + *

+ * See + * Task Management API on elastic.co + */ + public ListTasksResponse listTasks(ListTasksRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, + emptySet(), headers); + } + + /** + * Asynchronously get current tasks using the Task Management API + *

+ * See + * Task Management API on elastic.co + */ + public void listTasksAsync(ListTasksRequest request, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, + listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 2e7b4ba74cc39..a5a6b9f7bd271 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -29,7 +29,9 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; @@ -44,8 +46,8 @@ import org.elasticsearch.action.admin.indices.open.OpenIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; -import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequest; @@ -80,7 +82,9 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.script.mustache.SearchTemplateRequest; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.tasks.TaskId; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -458,6 +462,15 @@ static Request search(SearchRequest searchRequest) throws IOException { Request request = new Request(HttpPost.METHOD_NAME, endpoint(searchRequest.indices(), searchRequest.types(), "_search")); Params params = new Params(request); + addSearchRequestParams(params, searchRequest); + + if (searchRequest.source() != null) { + request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); + } + return request; + } + + private static void addSearchRequestParams(Params params, SearchRequest searchRequest) { params.putParam(RestSearchAction.TYPED_KEYS_PARAM, "true"); params.withRouting(searchRequest.routing()); params.withPreference(searchRequest.preference()); @@ -473,11 +486,6 @@ static Request search(SearchRequest searchRequest) throws IOException { if (searchRequest.scroll() != null) { params.putParam("scroll", searchRequest.scroll().keepAlive()); } - - if (searchRequest.source() != null) { - request.setEntity(createEntity(searchRequest.source(), REQUEST_BODY_CONTENT_TYPE)); - } - return request; } static Request searchScroll(SearchScrollRequest searchScrollRequest) throws IOException { @@ -507,6 +515,24 @@ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOExcep return request; } + static Request searchTemplate(SearchTemplateRequest searchTemplateRequest) throws IOException { + Request request; + + if (searchTemplateRequest.isSimulate()) { + request = new Request(HttpGet.METHOD_NAME, "_render/template"); + } else { + SearchRequest searchRequest = searchTemplateRequest.getRequest(); + String endpoint = endpoint(searchRequest.indices(), searchRequest.types(), "_search/template"); + request = new Request(HttpGet.METHOD_NAME, endpoint); + + Params params = new Params(request); + addSearchRequestParams(params, searchRequest); + } + + request.setEntity(createEntity(searchTemplateRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request existsAlias(GetAliasesRequest getAliasesRequest) { if ((getAliasesRequest.indices() == null || getAliasesRequest.indices().length == 0) && (getAliasesRequest.aliases() == null || getAliasesRequest.aliases().length == 0)) { @@ -582,6 +608,22 @@ static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSett return request; } + static Request listTasks(ListTasksRequest listTaskRequest) { + if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { + throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); + } + Request request = new Request(HttpGet.METHOD_NAME, "/_tasks"); + Params params = new Params(request); + params.withTimeout(listTaskRequest.getTimeout()) + .withDetailed(listTaskRequest.getDetailed()) + .withWaitForCompletion(listTaskRequest.getWaitForCompletion()) + .withParentTaskId(listTaskRequest.getParentTaskId()) + .withNodes(listTaskRequest.getNodes()) + .withActions(listTaskRequest.getActions()) + .putParam("group_by", "none"); + return request; + } + static Request rollover(RolloverRequest rolloverRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") .addPathPart(rolloverRequest.getNewIndexName()).build(); @@ -656,6 +698,19 @@ static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) { return request; } + static Request createRepository(PutRepositoryRequest putRepositoryRequest) throws IOException { + String endpoint = new EndpointBuilder().addPathPart("_snapshot").addPathPart(putRepositoryRequest.name()).build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withMasterTimeout(putRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(putRepositoryRequest.timeout()); + parameters.withVerify(putRepositoryRequest.verify()); + + request.setEntity(createEntity(putRepositoryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); @@ -894,6 +949,48 @@ Params withPreserveExisting(boolean preserveExisting) { } return this; } + + Params withDetailed(boolean detailed) { + if (detailed) { + return putParam("detailed", Boolean.TRUE.toString()); + } + return this; + } + + Params withWaitForCompletion(boolean waitForCompletion) { + if (waitForCompletion) { + return putParam("wait_for_completion", Boolean.TRUE.toString()); + } + return this; + } + + Params withNodes(String[] nodes) { + if (nodes != null && nodes.length > 0) { + return putParam("nodes", String.join(",", nodes)); + } + return this; + } + + Params withActions(String[] actions) { + if (actions != null && actions.length > 0) { + return putParam("actions", String.join(",", actions)); + } + return this; + } + + Params withParentTaskId(TaskId parentTaskId) { + if (parentTaskId != null && parentTaskId.isSet()) { + return putParam("parent_task_id", parentTaskId.toString()); + } + return this; + } + + Params withVerify(boolean verify) { + if (verify) { + return putParam("verify", Boolean.TRUE.toString()); + } + return this; + } } /** diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index 1985d6bd06dd4..5dbf2709d9988 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -64,6 +64,8 @@ import org.elasticsearch.plugins.spi.NamedXContentProvider; import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.mustache.SearchTemplateRequest; +import org.elasticsearch.script.mustache.SearchTemplateResponse; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.bucket.adjacency.AdjacencyMatrixAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.adjacency.ParsedAdjacencyMatrix; @@ -501,6 +503,32 @@ public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, listener, emptySet(), headers); } + /** + * Executes a request using the Search Template API. + * + * See Search Template API + * on elastic.co. + */ + public final SearchTemplateResponse searchTemplate(SearchTemplateRequest searchTemplateRequest, + Header... headers) throws IOException { + return performRequestAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, + SearchTemplateResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously executes a request using the Search Template API + * + * See Search Template API + * on elastic.co. + */ + public final void searchTemplateAsync(SearchTemplateRequest searchTemplateRequest, + ActionListener listener, + Header... headers) { + performRequestAsyncAndParseEntity(searchTemplateRequest, RequestConverters::searchTemplate, + SearchTemplateResponse::fromXContent, listener, emptySet(), headers); + } + + /** * Executes a request using the Ranking Evaluation API. * diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index e526fbe7164f9..aec94586bee30 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import java.io.IOException; @@ -67,4 +67,27 @@ public void getRepositoriesAsync(GetRepositoriesRequest getRepositoriesRequest, restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, GetRepositoriesResponse::fromXContent, listener, emptySet(), headers); } + + /** + * Creates a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public PutRepositoryResponse createRepository(PutRepositoryRequest putRepositoryRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, + PutRepositoryResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously creates a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, + ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, + PutRepositoryResponse::fromXContent, listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 9314bb2e36cea..fa3086442f528 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -20,6 +20,9 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -29,13 +32,16 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import static java.util.Collections.emptyList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -105,4 +111,29 @@ public void testClusterUpdateSettingNonExistent() { assertThat(exception.getMessage(), equalTo( "Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]")); } + + public void testListTasks() throws IOException { + ListTasksRequest request = new ListTasksRequest(); + ListTasksResponse response = execute(request, highLevelClient().cluster()::listTasks, highLevelClient().cluster()::listTasksAsync); + + assertThat(response, notNullValue()); + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + // It's possible that there are other tasks except 'cluster:monitor/tasks/lists[n]' and 'action":"cluster:monitor/tasks/lists' + assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); + boolean listTasksFound = false; + for (TaskGroup taskGroup : response.getTaskGroups()) { + TaskInfo parent = taskGroup.getTaskInfo(); + if ("cluster:monitor/tasks/lists".equals(parent.getAction())) { + assertThat(taskGroup.getChildTasks().size(), equalTo(1)); + TaskGroup childGroup = taskGroup.getChildTasks().iterator().next(); + assertThat(childGroup.getChildTasks().isEmpty(), equalTo(true)); + TaskInfo child = childGroup.getTaskInfo(); + assertThat(child.getAction(), equalTo("cluster:monitor/tasks/lists[n]")); + assertThat(child.getParentTaskId(), equalTo(parent.getTaskId())); + listTasksFound = true; + } + } + assertTrue("List tasks were not found", listTasksFound); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 2d4ef8b6413d9..4a0276e74d228 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -29,7 +29,9 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; @@ -77,9 +79,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -94,7 +98,10 @@ import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RestRankEvalAction; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.action.search.RestSearchAction; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.mustache.SearchTemplateRequest; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.support.ValueType; @@ -105,11 +112,13 @@ import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; import java.io.IOException; import java.io.InputStream; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -135,6 +144,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.hasEntry; import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class RequestConvertersTests extends ESTestCase { @@ -181,8 +191,7 @@ public void testMultiGet() throws IOException { int numberOfRequests = randomIntBetween(0, 32); for (int i = 0; i < numberOfRequests; i++) { - MultiGetRequest.Item item = - new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); + MultiGetRequest.Item item = new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); if (randomBoolean()) { item.routing(randomAlphaOfLength(4)); } @@ -261,7 +270,7 @@ public void testIndicesExist() { public void testIndicesExistEmptyIndices() { expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest())); - expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[])null))); + expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[]) null))); } private static void getAndExistsTest(Function requestConverter, String method) { @@ -415,7 +424,8 @@ public void testGetSettings() throws IOException { setRandomLocal(getSettingsRequest, expectedParams); if (randomBoolean()) { - //the request object will not have include_defaults present unless it is set to true + // the request object will not have include_defaults present unless it is set to + // true getSettingsRequest.includeDefaults(randomBoolean()); if (getSettingsRequest.includeDefaults()) { expectedParams.put("include_defaults", Boolean.toString(true)); @@ -959,22 +969,21 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); - assertEquals("Mismatching content-type found for request with content-type [JSON], " + - "previous requests have content-type [SMILE]", exception.getMessage()); + assertEquals( + "Mismatching content-type found for request with content-type [JSON], " + "previous requests have content-type [SMILE]", + exception.getMessage()); } { BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(new IndexRequest("index", "type", "0") - .source(singletonMap("field", "value"), XContentType.JSON)); - bulkRequest.add(new IndexRequest("index", "type", "1") - .source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); bulkRequest.add(new UpdateRequest("index", "type", "2") .doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) - .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) - ); + .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE))); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); - assertEquals("Mismatching content-type found for request with content-type [SMILE], " + - "previous requests have content-type [JSON]", exception.getMessage()); + assertEquals( + "Mismatching content-type found for request with content-type [SMILE], " + "previous requests have content-type [JSON]", + exception.getMessage()); } { XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML); @@ -1011,42 +1020,14 @@ public void testSearch() throws Exception { searchRequest.types(types); Map expectedParams = new HashMap<>(); - expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true"); - if (randomBoolean()) { - searchRequest.routing(randomAlphaOfLengthBetween(3, 10)); - expectedParams.put("routing", searchRequest.routing()); - } - if (randomBoolean()) { - searchRequest.preference(randomAlphaOfLengthBetween(3, 10)); - expectedParams.put("preference", searchRequest.preference()); - } - if (randomBoolean()) { - searchRequest.searchType(randomFrom(SearchType.values())); - } - expectedParams.put("search_type", searchRequest.searchType().name().toLowerCase(Locale.ROOT)); - if (randomBoolean()) { - searchRequest.requestCache(randomBoolean()); - expectedParams.put("request_cache", Boolean.toString(searchRequest.requestCache())); - } - if (randomBoolean()) { - searchRequest.allowPartialSearchResults(randomBoolean()); - expectedParams.put("allow_partial_search_results", Boolean.toString(searchRequest.allowPartialSearchResults())); - } - if (randomBoolean()) { - searchRequest.setBatchedReduceSize(randomIntBetween(2, Integer.MAX_VALUE)); - } - expectedParams.put("batched_reduce_size", Integer.toString(searchRequest.getBatchedReduceSize())); - if (randomBoolean()) { - searchRequest.scroll(randomTimeValue()); - expectedParams.put("scroll", searchRequest.scroll().keepAlive().getStringRep()); - } - + setRandomSearchParams(searchRequest, expectedParams); setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - //rarely skip setting the search source completely + // rarely skip setting the search source completely if (frequently()) { - //frequently set the search source to have some content, otherwise leave it empty but still set it + // frequently set the search source to have some content, otherwise leave it + // empty but still set it if (frequently()) { if (randomBoolean()) { searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); @@ -1116,7 +1097,8 @@ public void testMultiSearch() throws IOException { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); for (int i = 0; i < numberOfSearchRequests; i++) { SearchRequest searchRequest = randomSearchRequest(() -> { - // No need to return a very complex SearchSourceBuilder here, that is tested elsewhere + // No need to return a very complex SearchSourceBuilder here, that is tested + // elsewhere SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.from(randomInt(10)); searchSourceBuilder.size(randomIntBetween(20, 100)); @@ -1124,14 +1106,13 @@ public void testMultiSearch() throws IOException { }); // scroll is not supported in the current msearch api, so unset it: searchRequest.scroll((Scroll) null); - // only expand_wildcards, ignore_unavailable and allow_no_indices can be specified from msearch api, so unset other options: + // only expand_wildcards, ignore_unavailable and allow_no_indices can be + // specified from msearch api, so unset other options: IndicesOptions randomlyGenerated = searchRequest.indicesOptions(); IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions(); - searchRequest.indicesOptions(IndicesOptions.fromOptions( - randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), - randomlyGenerated.expandWildcardsClosed(), msearchDefault.allowAliasesToMultipleIndices(), - msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases() - )); + searchRequest.indicesOptions(IndicesOptions.fromOptions(randomlyGenerated.ignoreUnavailable(), + randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), randomlyGenerated.expandWildcardsClosed(), + msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases())); multiSearchRequest.add(searchRequest); } @@ -1156,8 +1137,8 @@ public void testMultiSearch() throws IOException { requests.add(searchRequest); }; MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())), - REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, - null, xContentRegistry(), true); + REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, null, + xContentRegistry(), true); assertEquals(requests, multiSearchRequest.requests()); } @@ -1189,11 +1170,70 @@ public void testClearScroll() throws IOException { assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); } + public void testSearchTemplate() throws Exception { + // Create a random request. + String[] indices = randomIndicesNames(0, 5); + SearchRequest searchRequest = new SearchRequest(indices); + + Map expectedParams = new HashMap<>(); + setRandomSearchParams(searchRequest, expectedParams); + setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); + + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(searchRequest); + + searchTemplateRequest.setScript("{\"query\": { \"match\" : { \"{{field}}\" : \"{{value}}\" }}}"); + searchTemplateRequest.setScriptType(ScriptType.INLINE); + searchTemplateRequest.setProfile(randomBoolean()); + + Map scriptParams = new HashMap<>(); + scriptParams.put("field", "name"); + scriptParams.put("value", "soren"); + searchTemplateRequest.setScriptParams(scriptParams); + + // Verify that the resulting REST request looks as expected. + Request request = RequestConverters.searchTemplate(searchTemplateRequest); + StringJoiner endpoint = new StringJoiner("/", "/", ""); + String index = String.join(",", indices); + if (Strings.hasLength(index)) { + endpoint.add(index); + } + endpoint.add("_search/template"); + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals(endpoint.toString(), request.getEndpoint()); + assertEquals(expectedParams, request.getParameters()); + assertToXContentBody(searchTemplateRequest, request.getEntity()); + } + + public void testRenderSearchTemplate() throws Exception { + // Create a simple request. + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(); + searchTemplateRequest.setSimulate(true); // Setting simulate true means the template should only be rendered. + + searchTemplateRequest.setScript("template1"); + searchTemplateRequest.setScriptType(ScriptType.STORED); + searchTemplateRequest.setProfile(randomBoolean()); + + Map scriptParams = new HashMap<>(); + scriptParams.put("field", "name"); + scriptParams.put("value", "soren"); + searchTemplateRequest.setScriptParams(scriptParams); + + // Verify that the resulting REST request looks as expected. + Request request = RequestConverters.searchTemplate(searchTemplateRequest); + String endpoint = "_render/template"; + + assertEquals(HttpGet.METHOD_NAME, request.getMethod()); + assertEquals(endpoint, request.getEndpoint()); + assertEquals(Collections.emptyMap(), request.getParameters()); + assertToXContentBody(searchTemplateRequest, request.getEntity()); + } + public void testExistsAlias() { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); getAliasesRequest.indices(indices); - //the HEAD endpoint requires at least an alias or an index + // the HEAD endpoint requires at least an alias or an index boolean hasIndices = indices != null && indices.length > 0; String[] aliases; if (hasIndices) { @@ -1224,15 +1264,15 @@ public void testExistsAlias() { public void testExistsAliasNoAliasNoIndex() { { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> - RequestConverters.existsAlias(getAliasesRequest)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> RequestConverters.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } { - GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[])null); - getAliasesRequest.indices((String[])null); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> - RequestConverters.existsAlias(getAliasesRequest)); + GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[]) null); + getAliasesRequest.indices((String[]) null); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> RequestConverters.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } } @@ -1242,14 +1282,10 @@ public void testFieldCaps() { String[] indices = randomIndicesNames(0, 5); String[] fields = generateRandomStringArray(5, 10, false, false); - FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest() - .indices(indices) - .fields(fields); + FieldCapabilitiesRequest fieldCapabilitiesRequest = new FieldCapabilitiesRequest().indices(indices).fields(fields); Map indicesOptionsParams = new HashMap<>(); - setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions, - fieldCapabilitiesRequest::indicesOptions, - indicesOptionsParams); + setRandomIndicesOptions(fieldCapabilitiesRequest::indicesOptions, fieldCapabilitiesRequest::indicesOptions, indicesOptionsParams); Request request = RequestConverters.fieldCaps(fieldCapabilitiesRequest); @@ -1264,12 +1300,13 @@ public void testFieldCaps() { assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(4, request.getParameters().size()); - // Note that we don't check the field param value explicitly, as field names are passed through - // a hash set before being added to the request, and can appear in a non-deterministic order. + // Note that we don't check the field param value explicitly, as field names are + // passed through + // a hash set before being added to the request, and can appear in a + // non-deterministic order. assertThat(request.getParameters(), hasKey("fields")); String[] requestFields = Strings.splitStringByCommaToArray(request.getParameters().get("fields")); - assertEquals(new HashSet<>(Arrays.asList(fields)), - new HashSet<>(Arrays.asList(requestFields))); + assertEquals(new HashSet<>(Arrays.asList(fields)), new HashSet<>(Arrays.asList(requestFields))); for (Map.Entry param : indicesOptionsParams.entrySet()) { assertThat(request.getParameters(), hasEntry(param.getKey(), param.getValue())); @@ -1428,6 +1465,66 @@ public void testIndexPutSettings() throws IOException { assertEquals(expectedParams, request.getParameters()); } + public void testListTasks() { + { + ListTasksRequest request = new ListTasksRequest(); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + request.setDetailed(randomBoolean()); + if (request.getDetailed()) { + expectedParams.put("detailed", "true"); + } + } + if (randomBoolean()) { + request.setWaitForCompletion(randomBoolean()); + if (request.getWaitForCompletion()) { + expectedParams.put("wait_for_completion", "true"); + } + } + if (randomBoolean()) { + String timeout = randomTimeValue(); + request.setTimeout(timeout); + expectedParams.put("timeout", timeout); + } + if (randomBoolean()) { + if (randomBoolean()) { + TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + request.setParentTaskId(taskId); + expectedParams.put("parent_task_id", taskId.toString()); + } else { + request.setParentTask(TaskId.EMPTY_TASK_ID); + } + } + if (randomBoolean()) { + String[] nodes = generateRandomStringArray(10, 8, false); + request.setNodes(nodes); + if (nodes.length > 0) { + expectedParams.put("nodes", String.join(",", nodes)); + } + } + if (randomBoolean()) { + String[] actions = generateRandomStringArray(10, 8, false); + request.setActions(actions); + if (actions.length > 0) { + expectedParams.put("actions", String.join(",", actions)); + } + } + expectedParams.put("group_by", "none"); + Request httpRequest = RequestConverters.listTasks(request); + assertThat(httpRequest, notNullValue()); + assertThat(httpRequest.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(httpRequest.getEntity(), nullValue()); + assertThat(httpRequest.getEndpoint(), equalTo("/_tasks")); + assertThat(httpRequest.getParameters(), equalTo(expectedParams)); + } + { + ListTasksRequest request = new ListTasksRequest(); + request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.listTasks(request)); + assertEquals("TaskId cannot be used for list tasks request", exception.getMessage()); + } + } + public void testGetRepositories() { Map expectedParams = new HashMap<>(); StringBuilder endpoint = new StringBuilder("/_snapshot"); @@ -1437,7 +1534,7 @@ public void testGetRepositories() { setRandomLocal(getRepositoriesRequest, expectedParams); if (randomBoolean()) { - String[] entries = new String[] {"a", "b", "c"}; + String[] entries = new String[] { "a", "b", "c" }; getRepositoriesRequest.repositories(entries); endpoint.append("/" + String.join(",", entries)); } @@ -1448,6 +1545,27 @@ public void testGetRepositories() { assertThat(expectedParams, equalTo(request.getParameters())); } + public void testCreateRepository() throws IOException { + String repository = "repo"; + String endpoint = "/_snapshot/" + repository; + Path repositoryLocation = PathUtils.get("."); + PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(repository); + putRepositoryRequest.type(FsRepository.TYPE); + putRepositoryRequest.verify(randomBoolean()); + + putRepositoryRequest.settings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .build()); + + Request request = RequestConverters.createRepository(putRepositoryRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); + assertToXContentBody(putRepositoryRequest, request.getEntity()); + } + public void testPutTemplateRequest() throws Exception { Map names = new HashMap<>(); names.put("log", "log"); @@ -1455,9 +1573,8 @@ public void testPutTemplateRequest() throws Exception { names.put("-#template", "-%23template"); names.put("foo^bar", "foo%5Ebar"); - PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest() - .name(randomFrom(names.keySet())) - .patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); + PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest().name(randomFrom(names.keySet())) + .patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); if (randomBoolean()) { putTemplateRequest.order(randomInt()); } @@ -1514,14 +1631,12 @@ public void testEndpointBuilder() { assertEquals("/a/b", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b") - .addPathPartAsIs("_create"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b").addPathPartAsIs("_create"); assertEquals("/a/b/_create", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c") - .addPathPartAsIs("_create"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c").addPathPartAsIs("_create"); assertEquals("/a/b/c/_create", endpointBuilder.build()); } { @@ -1580,13 +1695,12 @@ public void testEndpointBuilderEncodeParts() { assertEquals("/foo%5Ebar", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2") - .addPathPartAsIs("_search"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2").addPathPartAsIs("_search"); assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder() - .addCommaSeparatedPathParts(new String[]{"index1", "index2"}).addPathPartAsIs("cache/clear"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addCommaSeparatedPathParts(new String[] { "index1", "index2" }) + .addPathPartAsIs("cache/clear"); assertEquals("/index1,index2/cache/clear", endpointBuilder.build()); } } @@ -1594,12 +1708,12 @@ public void testEndpointBuilderEncodeParts() { public void testEndpoint() { assertEquals("/index/type/id", RequestConverters.endpoint("index", "type", "id")); assertEquals("/index/type/id/_endpoint", RequestConverters.endpoint("index", "type", "id", "_endpoint")); - assertEquals("/index1,index2", RequestConverters.endpoint(new String[]{"index1", "index2"})); - assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, "_endpoint")); - assertEquals("/index1,index2/type1,type2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, - new String[]{"type1", "type2"}, "_endpoint")); - assertEquals("/index1,index2/_endpoint/suffix1,suffix2", RequestConverters.endpoint(new String[]{"index1", "index2"}, - "_endpoint", new String[]{"suffix1", "suffix2"})); + assertEquals("/index1,index2", RequestConverters.endpoint(new String[] { "index1", "index2" })); + assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[] { "index1", "index2" }, "_endpoint")); + assertEquals("/index1,index2/type1,type2/_endpoint", + RequestConverters.endpoint(new String[] { "index1", "index2" }, new String[] { "type1", "type2" }, "_endpoint")); + assertEquals("/index1,index2/_endpoint/suffix1,suffix2", + RequestConverters.endpoint(new String[] { "index1", "index2" }, "_endpoint", new String[] { "suffix1", "suffix2" })); } public void testCreateContentType() { @@ -1615,20 +1729,22 @@ public void testEnforceSameContentType() { XContentType bulkContentType = randomBoolean() ? xContentType : null; - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), bulkContentType)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), + bulkContentType)); assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported", exception.getMessage()); - exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), bulkContentType)); + exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), + bulkContentType)); assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported", exception.getMessage()); XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON; - exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); + exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], " + "previous requests have content-type [" + xContentType + "]", exception.getMessage()); } @@ -1662,12 +1778,44 @@ private static void randomizeFetchSourceContextParams(Consumer expectedParams) { + expectedParams.put(RestSearchAction.TYPED_KEYS_PARAM, "true"); + if (randomBoolean()) { + searchRequest.routing(randomAlphaOfLengthBetween(3, 10)); + expectedParams.put("routing", searchRequest.routing()); + } + if (randomBoolean()) { + searchRequest.preference(randomAlphaOfLengthBetween(3, 10)); + expectedParams.put("preference", searchRequest.preference()); + } + if (randomBoolean()) { + searchRequest.searchType(randomFrom(SearchType.values())); + } + expectedParams.put("search_type", searchRequest.searchType().name().toLowerCase(Locale.ROOT)); + if (randomBoolean()) { + searchRequest.requestCache(randomBoolean()); + expectedParams.put("request_cache", Boolean.toString(searchRequest.requestCache())); + } + if (randomBoolean()) { + searchRequest.allowPartialSearchResults(randomBoolean()); + expectedParams.put("allow_partial_search_results", Boolean.toString(searchRequest.allowPartialSearchResults())); + } + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(2, Integer.MAX_VALUE)); + } + expectedParams.put("batched_reduce_size", Integer.toString(searchRequest.getBatchedReduceSize())); + if (randomBoolean()) { + searchRequest.scroll(randomTimeValue()); + expectedParams.put("scroll", searchRequest.scroll().keepAlive().getStringRep()); + } + } + private static void setRandomIndicesOptions(Consumer setter, Supplier getter, - Map expectedParams) { + Map expectedParams) { if (randomBoolean()) { - setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean())); + setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); } expectedParams.put("ignore_unavailable", Boolean.toString(getter.get().ignoreUnavailable())); expectedParams.put("allow_no_indices", Boolean.toString(getter.get().allowNoIndices())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java index 549b4ce0a85c5..e147642fc73bd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SearchIT.java @@ -38,8 +38,11 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.MatchQueryBuilder; import org.elasticsearch.index.query.ScriptQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; @@ -48,6 +51,8 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.mustache.SearchTemplateRequest; +import org.elasticsearch.script.mustache.SearchTemplateResponse; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.range.Range; @@ -69,10 +74,12 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.Matchers.both; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.either; @@ -733,6 +740,103 @@ public void testMultiSearch_failure() throws Exception { assertThat(multiSearchResponse.getResponses()[1].getResponse(), nullValue()); } + public void testSearchTemplate() throws IOException { + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(); + searchTemplateRequest.setRequest(new SearchRequest("index")); + + searchTemplateRequest.setScriptType(ScriptType.INLINE); + searchTemplateRequest.setScript( + "{" + + " \"query\": {" + + " \"match\": {" + + " \"num\": {{number}}" + + " }" + + " }" + + "}"); + + Map scriptParams = new HashMap<>(); + scriptParams.put("number", 10); + searchTemplateRequest.setScriptParams(scriptParams); + + searchTemplateRequest.setExplain(true); + searchTemplateRequest.setProfile(true); + + SearchTemplateResponse searchTemplateResponse = execute(searchTemplateRequest, + highLevelClient()::searchTemplate, + highLevelClient()::searchTemplateAsync); + + assertNull(searchTemplateResponse.getSource()); + + SearchResponse searchResponse = searchTemplateResponse.getResponse(); + assertNotNull(searchResponse); + + assertEquals(1, searchResponse.getHits().totalHits); + assertEquals(1, searchResponse.getHits().getHits().length); + assertThat(searchResponse.getHits().getMaxScore(), greaterThan(0f)); + + SearchHit hit = searchResponse.getHits().getHits()[0]; + assertNotNull(hit.getExplanation()); + + assertFalse(searchResponse.getProfileResults().isEmpty()); + } + + public void testNonExistentSearchTemplate() { + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(); + searchTemplateRequest.setRequest(new SearchRequest("index")); + + searchTemplateRequest.setScriptType(ScriptType.STORED); + searchTemplateRequest.setScript("non-existent"); + searchTemplateRequest.setScriptParams(Collections.emptyMap()); + + ElasticsearchStatusException exception = expectThrows(ElasticsearchStatusException.class, + () -> execute(searchTemplateRequest, + highLevelClient()::searchTemplate, + highLevelClient()::searchTemplateAsync)); + + assertEquals(RestStatus.NOT_FOUND, exception.status()); + } + + public void testRenderSearchTemplate() throws IOException { + SearchTemplateRequest searchTemplateRequest = new SearchTemplateRequest(); + + searchTemplateRequest.setScriptType(ScriptType.INLINE); + searchTemplateRequest.setScript( + "{" + + " \"query\": {" + + " \"match\": {" + + " \"num\": {{number}}" + + " }" + + " }" + + "}"); + + Map scriptParams = new HashMap<>(); + scriptParams.put("number", 10); + searchTemplateRequest.setScriptParams(scriptParams); + + // Setting simulate true causes the template to only be rendered. + searchTemplateRequest.setSimulate(true); + + SearchTemplateResponse searchTemplateResponse = execute(searchTemplateRequest, + highLevelClient()::searchTemplate, + highLevelClient()::searchTemplateAsync); + assertNull(searchTemplateResponse.getResponse()); + + BytesReference expectedSource = BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("match") + .field("num", 10) + .endObject() + .endObject() + .endObject()); + + BytesReference actualSource = searchTemplateResponse.getSource(); + assertNotNull(actualSource); + + assertToXContentEquivalent(expectedSource, actualSource, XContentType.JSON); + } + public void testFieldCaps() throws IOException { FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() .indices("index1", "index2") diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index ab2c632bfeb58..1d0ea953cd5c1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -19,56 +19,56 @@ package org.elasticsearch.client; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; import java.io.IOException; -import java.util.Collections; import static org.hamcrest.Matchers.equalTo; public class SnapshotIT extends ESRestHighLevelClientTestCase { - public void testModulesGetRepositoriesUsingParams() throws IOException { - String repository = "test"; - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository, Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); - - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository + "_other", Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); + private PutRepositoryResponse createTestRepository(String repository, String type, String settings) throws IOException { + PutRepositoryRequest request = new PutRepositoryRequest(repository); + request.settings(settings, XContentType.JSON); + request.type(type); + return execute(request, highLevelClient().snapshot()::createRepository, + highLevelClient().snapshot()::createRepositoryAsync); - { - GetRepositoriesRequest request = new GetRepositoriesRequest(); - request.repositories(new String[]{repository}); - GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, - highLevelClient().snapshot()::getRepositoriesAsync); - assertThat(1, equalTo(response.repositories().size())); - } - { - GetRepositoriesRequest request = new GetRepositoriesRequest(); - GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, - highLevelClient().snapshot()::getRepositoriesAsync); - assertThat(2, equalTo(response.repositories().size())); - } } - public void testModulesGetDefaultRepositories() throws IOException { - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - GetRepositoriesRequest request = new GetRepositoriesRequest(); + public void testCreateRepository() throws IOException { + PutRepositoryResponse response = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}"); + assertTrue(response.isAcknowledged()); + } - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/test", Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); + public void testModulesGetRepositoriesUsingParams() throws IOException { + String testRepository = "test"; + assertTrue(createTestRepository(testRepository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + GetRepositoriesRequest request = new GetRepositoriesRequest(); + request.repositories(new String[]{testRepository}); GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, highLevelClient().snapshot()::getRepositoriesAsync); assertThat(1, equalTo(response.repositories().size())); } + public void testModulesGetDefaultRepositories() throws IOException { + assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + assertTrue(createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + + GetRepositoriesResponse response = execute(new GetRepositoriesRequest(), highLevelClient().snapshot()::getRepositories, + highLevelClient().snapshot()::getRepositoriesAsync); + assertThat(2, equalTo(response.repositories().size())); + } + public void testModulesGetRepositoriesNonExistent() throws IOException { String repository = "doesnotexist"; GetRepositoriesRequest request = new GetRepositoriesRequest(new String[]{repository}); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 2e7ea1650f424..d41b11c68fe44 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -19,8 +19,14 @@ package org.elasticsearch.client.documentation; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; @@ -31,14 +37,20 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; /** * This class is used to generate the Java Cluster API documentation. @@ -177,4 +189,87 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testListTasks() throws IOException { + RestHighLevelClient client = highLevelClient(); + { + // tag::list-tasks-request + ListTasksRequest request = new ListTasksRequest(); + // end::list-tasks-request + + // tag::list-tasks-request-filter + request.setActions("cluster:*"); // <1> + request.setNodes("nodeId1", "nodeId2"); // <2> + request.setParentTaskId(new TaskId("parentTaskId", 42)); // <3> + // end::list-tasks-request-filter + + // tag::list-tasks-request-detailed + request.setDetailed(true); // <1> + // end::list-tasks-request-detailed + + // tag::list-tasks-request-wait-completion + request.setWaitForCompletion(true); // <1> + request.setTimeout(TimeValue.timeValueSeconds(50)); // <2> + request.setTimeout("50s"); // <3> + // end::list-tasks-request-wait-completion + } + + ListTasksRequest request = new ListTasksRequest(); + + // tag::list-tasks-execute + ListTasksResponse response = client.cluster().listTasks(request); + // end::list-tasks-execute + + assertThat(response, notNullValue()); + + // tag::list-tasks-response-tasks + List tasks = response.getTasks(); // <1> + // end::list-tasks-response-tasks + + // tag::list-tasks-response-calc + Map> perNodeTasks = response.getPerNodeTasks(); // <1> + List groups = response.getTaskGroups(); // <2> + // end::list-tasks-response-calc + + // tag::list-tasks-response-failures + List nodeFailures = response.getNodeFailures(); // <1> + List taskFailures = response.getTaskFailures(); // <2> + // end::list-tasks-response-failures + + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); + } + + public void testListTasksAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + ListTasksRequest request = new ListTasksRequest(); + + // tag::list-tasks-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ListTasksResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::list-tasks-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::list-tasks-execute-async + client.cluster().listTasksAsync(request, listener); // <1> + // end::list-tasks-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java index 6fdc60fcb3394..463c5f7d12f5e 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SearchDocumentationIT.java @@ -41,7 +41,11 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.client.ESRestHighLevelClientTestCase; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestHighLevelClient; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.unit.TimeValue; @@ -60,6 +64,9 @@ import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RatedSearchHit; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.script.mustache.SearchTemplateRequest; +import org.elasticsearch.script.mustache.SearchTemplateResponse; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -92,13 +99,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import static org.elasticsearch.index.query.QueryBuilders.matchQuery; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -707,9 +714,130 @@ public void onFailure(Exception e) { } } + public void testSearchTemplateWithInlineScript() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + + // tag::search-template-request-inline + SearchTemplateRequest request = new SearchTemplateRequest(); + request.setRequest(new SearchRequest("posts")); // <1> + + request.setScriptType(ScriptType.INLINE); + request.setScript( // <2> + "{" + + " \"query\": { \"match\" : { \"{{field}}\" : \"{{value}}\" } }," + + " \"size\" : \"{{size}}\"" + + "}"); + + Map scriptParams = new HashMap<>(); + scriptParams.put("field", "title"); + scriptParams.put("value", "elasticsearch"); + scriptParams.put("size", 5); + request.setScriptParams(scriptParams); // <3> + // end::search-template-request-inline + + // tag::search-template-response + SearchTemplateResponse response = client.searchTemplate(request); + SearchResponse searchResponse = response.getResponse(); + // end::search-template-response + + assertNotNull(searchResponse); + assertTrue(searchResponse.getHits().totalHits > 0); + + // tag::render-search-template-request + request.setSimulate(true); // <1> + // end::render-search-template-request + + // tag::render-search-template-response + SearchTemplateResponse renderResponse = client.searchTemplate(request); + BytesReference source = renderResponse.getSource(); // <1> + // end::render-search-template-response + + assertNotNull(source); + assertEquals(( + "{" + + " \"size\" : \"5\"," + + " \"query\": { \"match\" : { \"title\" : \"elasticsearch\" } }" + + "}").replaceAll("\\s+", ""), source.utf8ToString()); + } + + public void testSearchTemplateWithStoredScript() throws Exception { + indexSearchTestData(); + RestHighLevelClient client = highLevelClient(); + RestClient restClient = client(); + + // tag::register-script + Request scriptRequest = new Request("POST", "_scripts/title_search"); + scriptRequest.setJsonEntity( + "{" + + " \"script\": {" + + " \"lang\": \"mustache\"," + + " \"source\": {" + + " \"query\": { \"match\" : { \"{{field}}\" : \"{{value}}\" } }," + + " \"size\" : \"{{size}}\"" + + " }" + + " }" + + "}"); + Response scriptResponse = restClient.performRequest(scriptRequest); + // end::register-script + assertEquals(RestStatus.OK.getStatus(), scriptResponse.getStatusLine().getStatusCode()); + + // tag::search-template-request-stored + SearchTemplateRequest request = new SearchTemplateRequest(); + request.setRequest(new SearchRequest("posts")); + + request.setScriptType(ScriptType.STORED); + request.setScript("title_search"); + + Map params = new HashMap<>(); + params.put("field", "title"); + params.put("value", "elasticsearch"); + params.put("size", 5); + request.setScriptParams(params); + // end::search-template-request-stored + + // tag::search-template-request-options + request.setExplain(true); + request.setProfile(true); + // end::search-template-request-options + + // tag::search-template-execute + SearchTemplateResponse response = client.searchTemplate(request); + // end::search-template-execute + + SearchResponse searchResponse = response.getResponse(); + assertNotNull(searchResponse); + assertTrue(searchResponse.getHits().totalHits > 0); + + // tag::search-template-execute-listener + ActionListener listener = new ActionListener() { + @Override + public void onResponse(SearchTemplateResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::search-template-execute-listener + + // Replace the empty listener by a blocking listener for tests. + CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::search-template-execute-async + client.searchTemplateAsync(request, listener); // <1> + // end::search-template-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + public void testFieldCaps() throws Exception { indexSearchTestData(); RestHighLevelClient client = highLevelClient(); + // tag::field-caps-request FieldCapabilitiesRequest request = new FieldCapabilitiesRequest() .fields("user") @@ -725,22 +853,26 @@ public void testFieldCaps() throws Exception { // end::field-caps-execute // tag::field-caps-response - assertThat(response.get().keySet(), contains("user")); - Map userResponse = response.getField("user"); - - assertThat(userResponse.keySet(), containsInAnyOrder("keyword", "text")); // <1> + Map userResponse = response.getField("user"); // <1> FieldCapabilities textCapabilities = userResponse.get("keyword"); - assertTrue(textCapabilities.isSearchable()); - assertFalse(textCapabilities.isAggregatable()); + boolean isSearchable = textCapabilities.isSearchable(); + boolean isAggregatable = textCapabilities.isAggregatable(); - assertArrayEquals(textCapabilities.indices(), // <2> - new String[]{"authors", "contributors"}); - assertNull(textCapabilities.nonSearchableIndices()); // <3> - assertArrayEquals(textCapabilities.nonAggregatableIndices(), // <4> - new String[]{"authors"}); + String[] indices = textCapabilities.indices(); // <2> + String[] nonSearchableIndices = textCapabilities.nonSearchableIndices(); // <3> + String[] nonAggregatableIndices = textCapabilities.nonAggregatableIndices();//<4> // end::field-caps-response + assertThat(userResponse.keySet(), containsInAnyOrder("keyword", "text")); + + assertTrue(isSearchable); + assertFalse(isAggregatable); + + assertArrayEquals(indices, new String[]{"authors", "contributors"}); + assertNull(nonSearchableIndices); + assertArrayEquals(nonAggregatableIndices, new String[]{"authors"}); + // tag::field-caps-execute-listener ActionListener listener = new ActionListener() { @Override diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 1044cc9da3332..c57f8e2a2fbd5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -19,20 +19,24 @@ package org.elasticsearch.client.documentation; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.repositories.fs.FsRepository; import java.io.IOException; -import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -58,7 +62,114 @@ */ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase { - private static final String testRepository = "test_repository"; + private static final String repositoryName = "test_repository"; + + public void testSnapshotCreateRepository() throws IOException { + RestHighLevelClient client = highLevelClient(); + + // tag::create-repository-request + PutRepositoryRequest request = new PutRepositoryRequest(); + // end::create-repository-request + + // tag::create-repository-create-settings + String locationKey = FsRepository.LOCATION_SETTING.getKey(); + String locationValue = "."; + String compressKey = FsRepository.COMPRESS_SETTING.getKey(); + boolean compressValue = true; + + Settings settings = Settings.builder() + .put(locationKey, locationValue) + .put(compressKey, compressValue) + .build(); // <1> + // end::create-repository-create-settings + + // tag::create-repository-request-repository-settings + request.settings(settings); // <1> + // end::create-repository-request-repository-settings + + { + // tag::create-repository-settings-builder + Settings.Builder settingsBuilder = Settings.builder() + .put(locationKey, locationValue) + .put(compressKey, compressValue); + request.settings(settingsBuilder); // <1> + // end::create-repository-settings-builder + } + { + // tag::create-repository-settings-map + Map map = new HashMap<>(); + map.put(locationKey, locationValue); + map.put(compressKey, compressValue); + request.settings(map); // <1> + // end::create-repository-settings-map + } + { + // tag::create-repository-settings-source + request.settings("{\"location\": \".\", \"compress\": \"true\"}", + XContentType.JSON); // <1> + // end::create-repository-settings-source + } + + // tag::create-repository-request-name + request.name(repositoryName); // <1> + // end::create-repository-request-name + // tag::create-repository-request-type + request.type(FsRepository.TYPE); // <1> + // end::create-repository-request-type + + // tag::create-repository-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::create-repository-request-masterTimeout + // tag::create-repository-request-timeout + request.timeout(TimeValue.timeValueMinutes(1)); // <1> + request.timeout("1m"); // <2> + // end::create-repository-request-timeout + // tag::create-repository-request-verify + request.verify(true); // <1> + // end::create-repository-request-verify + + // tag::create-repository-execute + PutRepositoryResponse response = client.snapshot().createRepository(request); + // end::create-repository-execute + + // tag::create-repository-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::create-repository-response + assertTrue(acknowledged); + } + + public void testSnapshotCreateRepositoryAsync() throws InterruptedException { + RestHighLevelClient client = highLevelClient(); + { + PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); + + // tag::create-repository-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(PutRepositoryResponse putRepositoryResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::create-repository-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::create-repository-execute-async + client.snapshot().createRepositoryAsync(request, listener); // <1> + // end::create-repository-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } public void testSnapshotGetRepository() throws IOException { RestHighLevelClient client = highLevelClient(); @@ -70,7 +181,7 @@ public void testSnapshotGetRepository() throws IOException { // end::get-repository-request // tag::get-repository-request-repositories - String [] repositories = new String[] { testRepository }; + String [] repositories = new String[] {repositoryName}; request.repositories(repositories); // <1> // end::get-repository-request-repositories // tag::get-repository-request-local @@ -89,7 +200,7 @@ public void testSnapshotGetRepository() throws IOException { List repositoryMetaDataResponse = response.repositories(); // end::get-repository-response assertThat(1, equalTo(repositoryMetaDataResponse.size())); - assertThat(testRepository, equalTo(repositoryMetaDataResponse.get(0).name())); + assertThat(repositoryName, equalTo(repositoryMetaDataResponse.get(0).name())); } public void testSnapshotGetRepositoryAsync() throws InterruptedException { @@ -122,14 +233,12 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - } private void createTestRepositories() throws IOException { - RestHighLevelClient client = highLevelClient(); - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + testRepository, Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); - + PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); + request.type(FsRepository.TYPE); + request.settings("{\"location\": \".\"}", XContentType.JSON); + assertTrue(highLevelClient().snapshot().createRepository(request).isAcknowledged()); } } diff --git a/client/rest/src/main/java/org/elasticsearch/client/PersistentCredentialsAuthenticationStrategy.java b/client/rest/src/main/java/org/elasticsearch/client/PersistentCredentialsAuthenticationStrategy.java new file mode 100644 index 0000000000000..4ae22fbe3728e --- /dev/null +++ b/client/rest/src/main/java/org/elasticsearch/client/PersistentCredentialsAuthenticationStrategy.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + * + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpHost; +import org.apache.http.auth.AuthScheme; +import org.apache.http.impl.client.TargetAuthenticationStrategy; +import org.apache.http.protocol.HttpContext; + +/** + * An {@link org.apache.http.client.AuthenticationStrategy} implementation that does not perform + * any special handling if authentication fails. + * The default handler in Apache HTTP client mimics standard browser behaviour of clearing authentication + * credentials if it receives a 401 response from the server. While this can be useful for browser, it is + * rarely the desired behaviour with the Elasticsearch REST API. + * If the code using the REST client has configured credentials for the REST API, then we can and should + * assume that this is intentional, and those credentials represent the best possible authentication + * mechanism to the Elasticsearch node. + * If we receive a 401 status, a probably cause is that the authentication mechanism in place was unable + * to perform the requisite password checks (the node has not yet recovered its state, or an external + * authentication provider was unavailable). + * If this occurs, then the desired behaviour is for the Rest client to retry with the same credentials + * (rather than trying with no credentials, or expecting the calling code to provide alternate credentials). + */ +final class PersistentCredentialsAuthenticationStrategy extends TargetAuthenticationStrategy { + + private final Log logger = LogFactory.getLog(PersistentCredentialsAuthenticationStrategy.class); + + @Override + public void authFailed(HttpHost host, AuthScheme authScheme, HttpContext context) { + if (logger.isDebugEnabled()) { + logger.debug("Authentication to " + host + " failed (scheme: " + authScheme.getSchemeName() + + "). Preserving credentials for next request"); + } + // Do nothing. + // The superclass implementation of method will clear the credentials from the cache, but we don't + } +} diff --git a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java index 8768c07161989..5f7831c67fc28 100644 --- a/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/elasticsearch/client/RestClientBuilder.java @@ -204,7 +204,8 @@ private CloseableHttpAsyncClient createHttpClient() { HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create().setDefaultRequestConfig(requestConfigBuilder.build()) //default settings for connection pooling may be too constraining .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE).setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL) - .setSSLContext(SSLContext.getDefault()); + .setSSLContext(SSLContext.getDefault()) + .setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy()); if (httpClientConfigCallback != null) { httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder); } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java index b83115a5341dd..6625c389c6be8 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RequestTests.java @@ -26,13 +26,16 @@ import org.apache.http.Header; import org.apache.http.HttpEntity; +import org.apache.http.entity.ByteArrayEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; +import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.fail; @@ -151,6 +154,103 @@ public void testSetHeaders() { assertArrayEquals(headers, request.getHeaders()); } - // TODO equals and hashcode + public void testEqualsAndHashCode() { + Request request = randomRequest(); + assertEquals(request, request); + Request copy = copy(request); + assertEquals(request, copy); + assertEquals(copy, request); + assertEquals(request.hashCode(), copy.hashCode()); + + Request mutant = mutate(request); + assertNotEquals(request, mutant); + assertNotEquals(mutant, request); + } + + private Request randomRequest() { + Request request = new Request( + randomFrom(new String[] {"GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS"}), + randomAsciiAlphanumOfLength(5)); + + int parameterCount = between(0, 5); + for (int i = 0; i < parameterCount; i++) { + request.addParameter(randomAsciiAlphanumOfLength(i), randomAsciiLettersOfLength(3)); + } + + if (randomBoolean()) { + if (randomBoolean()) { + request.setJsonEntity(randomAsciiAlphanumOfLength(10)); + } else { + request.setEntity(randomFrom(new HttpEntity[] { + new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new NStringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON) + })); + } + } + + if (randomBoolean()) { + int headerCount = between(1, 5); + Header[] headers = new Header[headerCount]; + for (int i = 0; i < headerCount; i++) { + headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3)); + } + request.setHeaders(headers); + } + + if (randomBoolean()) { + request.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1)); + } + + return request; + } + + private Request copy(Request request) { + Request copy = new Request(request.getMethod(), request.getEndpoint()); + copyMutables(request, copy); + return copy; + } + + private Request mutate(Request request) { + if (randomBoolean()) { + // Mutate request or method but keep everything else constant + Request mutant = randomBoolean() + ? new Request(request.getMethod() + "m", request.getEndpoint()) + : new Request(request.getMethod(), request.getEndpoint() + "m"); + copyMutables(request, mutant); + return mutant; + } + Request mutant = copy(request); + int mutationType = between(0, 3); + switch (mutationType) { + case 0: + mutant.addParameter(randomAsciiAlphanumOfLength(mutant.getParameters().size() + 4), "extra"); + return mutant; + case 1: + mutant.setJsonEntity("mutant"); // randomRequest can't produce this value + return mutant; + case 2: + if (mutant.getHeaders().length > 0) { + mutant.setHeaders(new Header[0]); + } else { + mutant.setHeaders(new BasicHeader("extra", "m")); + } + return mutant; + case 3: + mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5)); + return mutant; + default: + throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]"); + } + } + + private void copyMutables(Request from, Request to) { + for (Map.Entry param : from.getParameters().entrySet()) { + to.addParameter(param.getKey(), param.getValue()); + } + to.setEntity(from.getEntity()); + to.setHeaders(from.getHeaders()); + to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory()); + } } diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java index 667e38a5167d7..35cac627bbe6a 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientSingleHostIntegTests.java @@ -31,14 +31,14 @@ import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.BasicCredentialsProvider; +import org.apache.http.impl.client.TargetAuthenticationStrategy; import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; +import org.apache.http.message.BasicHeader; import org.apache.http.nio.entity.NStringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.mocksocket.MockHttpServer; import org.junit.After; -import org.junit.AfterClass; import org.junit.Before; -import org.junit.BeforeClass; import java.io.IOException; import java.io.InputStreamReader; @@ -147,6 +147,8 @@ public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder h if (usePreemptiveAuth == false) { // disable preemptive auth by ignoring any authcache httpClientBuilder.disableAuthCaching(); + // don't use the "persistent credentials strategy" + httpClientBuilder.setTargetAuthenticationStrategy(new TargetAuthenticationStrategy()); } return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); @@ -193,7 +195,7 @@ public void onFailure(Exception exception) { assertTrue("timeout waiting for requests to be sent", latch.await(10, TimeUnit.SECONDS)); if (exceptions.isEmpty() == false) { AssertionError error = new AssertionError("expected no failures but got some. see suppressed for first 10 of [" - + exceptions.size() + "] failures"); + + exceptions.size() + "] failures"); for (Exception exception : exceptions.subList(0, Math.min(10, exceptions.size()))) { error.addSuppressed(exception); } @@ -217,7 +219,7 @@ public void testHeaders() throws IOException { Response esResponse; try { esResponse = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), requestHeaders); - } catch(ResponseException e) { + } catch (ResponseException e) { esResponse = e.getResponse(); } @@ -291,8 +293,8 @@ public void testEncodeParams() throws IOException { /** * Verify that credentials are sent on the first request with preemptive auth enabled (default when provided with credentials). */ - public void testPreemptiveAuthEnabled() throws IOException { - final String[] methods = { "POST", "PUT", "GET", "DELETE" }; + public void testPreemptiveAuthEnabled() throws IOException { + final String[] methods = {"POST", "PUT", "GET", "DELETE"}; try (RestClient restClient = createRestClient(true, true)) { for (final String method : methods) { @@ -306,8 +308,8 @@ public void testPreemptiveAuthEnabled() throws IOException { /** * Verify that credentials are not sent on the first request with preemptive auth disabled. */ - public void testPreemptiveAuthDisabled() throws IOException { - final String[] methods = { "POST", "PUT", "GET", "DELETE" }; + public void testPreemptiveAuthDisabled() throws IOException { + final String[] methods = {"POST", "PUT", "GET", "DELETE"}; try (RestClient restClient = createRestClient(true, false)) { for (final String method : methods) { @@ -318,12 +320,31 @@ public void testPreemptiveAuthDisabled() throws IOException { } } + /** + * Verify that credentials continue to be sent even if a 401 (Unauthorized) response is received + */ + public void testAuthCredentialsAreNotClearedOnAuthChallenge() throws IOException { + final String[] methods = {"POST", "PUT", "GET", "DELETE"}; + + try (RestClient restClient = createRestClient(true, true)) { + for (final String method : methods) { + Header realmHeader = new BasicHeader("WWW-Authenticate", "Basic realm=\"test\""); + final Response response401 = bodyTest(restClient, method, 401, new Header[]{realmHeader}); + assertThat(response401.getHeader("Authorization"), startsWith("Basic")); + + final Response response200 = bodyTest(restClient, method, 200, new Header[0]); + assertThat(response200.getHeader("Authorization"), startsWith("Basic")); + } + } + + } + public void testUrlWithoutLeadingSlash() throws Exception { if (pathPrefix.length() == 0) { try { restClient.performRequest("GET", "200"); fail("request should have failed"); - } catch(ResponseException e) { + } catch (ResponseException e) { assertEquals(404, e.getResponse().getStatusLine().getStatusCode()); } } else { @@ -335,8 +356,8 @@ public void testUrlWithoutLeadingSlash() throws Exception { { //pathPrefix is not required to start with '/', will be added automatically try (RestClient restClient = RestClient.builder( - new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) - .setPathPrefix(pathPrefix.substring(1)).build()) { + new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) + .setPathPrefix(pathPrefix.substring(1)).build()) { Response response = restClient.performRequest("GET", "200"); //a trailing slash gets automatically added if a pathPrefix is configured assertEquals(200, response.getStatusLine().getStatusCode()); @@ -350,10 +371,15 @@ private Response bodyTest(final String method) throws IOException { } private Response bodyTest(final RestClient restClient, final String method) throws IOException { - String requestBody = "{ \"field\": \"value\" }"; int statusCode = randomStatusCode(getRandom()); + return bodyTest(restClient, method, statusCode, new Header[0]); + } + + private Response bodyTest(RestClient restClient, String method, int statusCode, Header[] headers) throws IOException { + String requestBody = "{ \"field\": \"value\" }"; Request request = new Request(method, "/" + statusCode); request.setJsonEntity(requestBody); + request.setHeaders(headers); Response esResponse; try { esResponse = restClient.performRequest(request); diff --git a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java index 872b327954b02..ea124828e45eb 100644 --- a/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -96,7 +96,7 @@ public void onFailure(Exception exception) { } /** - * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. + * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}. */ @Deprecated public void testPerformOldStyleAsyncWithNullParams() throws Exception { diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 9fa06021236a2..5d1703399aad4 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -224,7 +224,7 @@ subprojects { doLast { // this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines final List expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003") - final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack/x-pack-ml/NOTICE.txt") + final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack-ml/NOTICE.txt") final List actualLines = Files.readAllLines(noticePath) for (final String expectedLine : expectedLines) { if (actualLines.contains(expectedLine) == false) { diff --git a/distribution/build.gradle b/distribution/build.gradle index 266cb8f8b270a..d2e2810bc7eec 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -201,17 +201,14 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { // use licenses from each of the bundled xpack plugins Project xpack = project(':x-pack:plugin') -xpack.subprojects.findAll { it.name != 'bwc' }.each { Project xpackSubproject -> - File licenses = new File(xpackSubproject.projectDir, 'licenses') +xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> + File licenses = new File(xpackModule.projectDir, 'licenses') if (licenses.exists()) { buildDefaultNotice.licensesDir licenses } + copyModule(processDefaultOutputs, xpackModule) + copyLog4jProperties(buildDefaultLog4jConfig, xpackModule) } -// but copy just the top level meta plugin to the default modules -copyModule(processDefaultOutputs, xpack) -copyLog4jProperties(buildDefaultLog4jConfig, xpack) - -// // make sure we have a clean task since we aren't a java project, but we have tasks that // put stuff in the build dir diff --git a/docs/CHANGELOG.asciidoc b/docs/CHANGELOG.asciidoc deleted file mode 100644 index 6eb26fde8f9f8..0000000000000 --- a/docs/CHANGELOG.asciidoc +++ /dev/null @@ -1,257 +0,0 @@ -[[es-release-notes]] -= {es} Release Notes - -[partintro] --- -// To add a release, copy and paste the template text -// and add a link to the new section. Note that release subheads must -// be floated and sections cannot be empty. - -// Use these for links to issue and pulls. Note issues and pulls redirect one to -// each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/elasticsearch/issues/ -:pull: https://github.com/elastic/elasticsearch/pull/ - -This section summarizes the changes in each release. - -* <> -* <> -* <> - --- - -//// -// To add a release, copy and paste the following text, uncomment the relevant -// sections, and add a link to the new section in the list of releases at the -// top of the page. Note that release subheads must be floated and sections -// cannot be empty. -// TEMPLATE: - -// [[release-notes-n.n.n]] -// == {es} n.n.n - -//[float] -[[breaking-n.n.n]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -//[float] -//=== Bug Fixes - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -//// - -[[release-notes-7.0.0]] -== {es} 7.0.0 - -coming[7.0.0] - -[float] -[[breaking-7.0.0]] -=== Breaking Changes - -<> ({pull}29609[#29609]) - -<> ({pull}29004[#29004]) -<> ({pull}29635[#29635]) - -<> ({pull}30185[#30185]) - -Machine Learning:: -* The `max_running_jobs` node property is removed in this release. Use the -`xpack.ml.max_open_jobs` setting instead. For more information, see <>. - -* <> ({pull}29601[#29601]) - -//[float] -//=== Breaking Java Changes - -[float] -=== Deprecations -Monitoring:: -* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1` -to disable monitoring data collection. Use `xpack.monitoring.collection.enabled` -and set it to `false` (its default), which was added in 6.3.0. - -Security:: -* The fields returned as part of the mappings section by get index, get -mappings, get field mappings, and field capabilities API are now only the -ones that the user is authorized to access in case field level security is enabled. - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) - -Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) - -Fixed prerelease version of elasticsearch in the `deb` package to sort before GA versions -({pull}29000[#29000]) - -Rollup:: -* Validate timezone in range queries to ensure they match the selected job when -searching ({pull}30338[#30338]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -[float] -=== Regressions -Fail snapshot operations early when creating or deleting a snapshot on a repository that has been -written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) - -Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) -Do not fail snapshot when deleting a missing snapshotted file ({pull}30332[#30332]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.4.0]] -== {es} 6.4.0 - -coming[6.4.0] - -//[float] -[[breaking-6.4.0]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -[float] -=== Deprecations - -Deprecated multi-argument versions of the request methods in the RestClient. -Prefer the "Request" object flavored methods. ({pull}30315[#30315]) - -[float] -=== New Features - -The new <> field allows to know which fields -got ignored at index time because of the <> -option. ({pull}30140[#29658]) - -A new analysis plugin called `analysis_nori` that exposes the Lucene Korean -analysis module. ({pull}30397[#30397]) - -[float] -=== Enhancements - -{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow -copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404]) - -Added new "Request" object flavored request methods in the RestClient. Prefer -these instead of the multi-argument versions. ({pull}29623[#29623]) - -Added `setJsonEntity` to `Request` object so it is marginally easier to send JSON. ({pull}30447[#30447]) -Watcher HTTP client used in watches now allows more parallel connections to the -same endpoint and evicts long running connections. ({pull}30130[#30130]) - -The cluster state listener to decide if watcher should be -stopped/started/paused now runs far less code in an executor but is more -synchronous and predictable. Also the trigger engine thread is only started on -data nodes. And the Execute Watch API can be triggered regardless is watcher is -started or stopped. ({pull}30118[#30118]) - -Added put index template API to the high level rest client ({pull}30400[#30400]) - -Add ability to filter coordinating-only nodes when interacting with cluster -APIs. ({pull}30313[#30313]) - -[float] -=== Bug Fixes - -Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) - -Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) - -Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) - -Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) - -Machine Learning:: - -* Account for gaps in data counts after job is reopened ({pull}30294[#30294]) - -Add validation that geohashes are not empty and don't contain unsupported characters ({pull}30376[#30376]) - -Rollup:: -* Validate timezone in range queries to ensure they match the selected job when -searching ({pull}30338[#30338]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -Allocation:: - -Auto-expand replicas when adding or removing nodes to prevent shard copies from -being dropped and resynced when a data node rejoins the cluster ({pull}30423[#30423]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues - -[[release-notes-6.3.1]] -== Elasticsearch version 6.3.1 - -coming[6.3.1] - -//[float] -[[breaking-6.3.1]] -//=== Breaking Changes - -//[float] -//=== Breaking Java Changes - -//[float] -//=== Deprecations - -//[float] -//=== New Features - -//[float] -//=== Enhancements - -[float] -=== Bug Fixes - -Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180]) - -Respect accept header on requests with no handler ({pull}30383[#30383]) - -SQL:: -* Fix parsing of Dates containing milliseconds ({pull}30419[#30419]) - -//[float] -//=== Regressions - -//[float] -//=== Known Issues diff --git a/docs/java-api/query-dsl/has-child-query.asciidoc b/docs/java-api/query-dsl/has-child-query.asciidoc index 300b32e1922b0..f47f3af487dfe 100644 --- a/docs/java-api/query-dsl/has-child-query.asciidoc +++ b/docs/java-api/query-dsl/has-child-query.asciidoc @@ -9,7 +9,7 @@ When using the `has_child` query it is important to use the `PreBuiltTransportCl -------------------------------------------------- Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); +client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); -------------------------------------------------- Otherwise the parent-join module doesn't get loaded and the `has_child` query can't be used from the transport client. diff --git a/docs/java-rest/high-level/cluster/list_tasks.asciidoc b/docs/java-rest/high-level/cluster/list_tasks.asciidoc new file mode 100644 index 0000000000000..1a2117b2e66e6 --- /dev/null +++ b/docs/java-rest/high-level/cluster/list_tasks.asciidoc @@ -0,0 +1,101 @@ +[[java-rest-high-cluster-list-tasks]] +=== List Tasks API + +The List Tasks API allows to get information about the tasks currently executing in the cluster. + +[[java-rest-high-cluster-list-tasks-request]] +==== List Tasks Request + +A `ListTasksRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request] +-------------------------------------------------- +There is no required parameters. By default the client will list all tasks and will not wait +for task completion. + +==== Parameters + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-filter] +-------------------------------------------------- +<1> Request only cluster-related tasks +<2> Request all tasks running on nodes nodeId1 and nodeId2 +<3> Request only children of a particular task + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-detailed] +-------------------------------------------------- +<1> Should the information include detailed, potentially slow to generate data. Defaults to `false` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-wait-completion] +-------------------------------------------------- +<1> Should this request wait for all found tasks to complete. Defaults to `false` +<2> Timeout for the request as a `TimeValue`. Applicable only if `setWaitForCompletion` is `true`. +Defaults to 30 seconds +<3> Timeout as a `String` + +[[java-rest-high-cluster-list-tasks-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute] +-------------------------------------------------- + +[[java-rest-high-cluster-list-tasks-async]] +==== Asynchronous Execution + +The asynchronous execution of a cluster update settings requires both the +`ListTasksRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-async] +-------------------------------------------------- +<1> The `ListTasksRequest` to execute and the `ActionListener` to use +when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ListTasksResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-list-tasks-response]] +==== List Tasks Response + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-tasks] +-------------------------------------------------- +<1> List of currently running tasks + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-calc] +-------------------------------------------------- +<1> List of tasks grouped by a node +<2> List of tasks grouped by a parent task + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-failures] +-------------------------------------------------- +<1> List of node failures +<2> List of tasks failures diff --git a/docs/java-rest/high-level/search/field-caps.asciidoc b/docs/java-rest/high-level/search/field-caps.asciidoc index fef30f629ca61..1f5b10ad034df 100644 --- a/docs/java-rest/high-level/search/field-caps.asciidoc +++ b/docs/java-rest/high-level/search/field-caps.asciidoc @@ -76,7 +76,7 @@ information about how each index contributes to the field's capabilities. -------------------------------------------------- include-tagged::{doc-tests}/SearchDocumentationIT.java[field-caps-response] -------------------------------------------------- -<1> The `user` field has two possible types, `keyword` and `text`. -<2> This field only has type `keyword` in the `authors` and `contributors` indices. -<3> Null, since the field is searchable in all indices for which it has the `keyword` type. -<4> The `user` field is not aggregatable in the `authors` index. \ No newline at end of file +<1> A map with entries for the field's possible types, in this case `keyword` and `text`. +<2> All indices where the `user` field has type `keyword`. +<3> The subset of these indices where the `user` field isn't searchable, or null if it's always searchable. +<4> Another subset of these indices where the `user` field isn't aggregatable, or null if it's always aggregatable. \ No newline at end of file diff --git a/docs/java-rest/high-level/search/search-template.asciidoc b/docs/java-rest/high-level/search/search-template.asciidoc new file mode 100644 index 0000000000000..3f0dfb8ab28e0 --- /dev/null +++ b/docs/java-rest/high-level/search/search-template.asciidoc @@ -0,0 +1,117 @@ +[[java-rest-high-search-template]] +=== Search Template API + +The search template API allows for searches to be executed from a template based +on the mustache language, and also for previewing rendered templates. + +[[java-rest-high-search-template-request]] +==== Search Template Request + +===== Inline Templates + +In the most basic form of request, the search template is specified inline: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-request-inline] +-------------------------------------------------- +<1> The search is executed against the `posts` index. +<2> The template defines the structure of the search source. It is passed +as a string because mustache templates are not always valid JSON. +<3> Before running the search, the template is rendered with the provided parameters. + +===== Registered Templates + +Search templates can be registered in advance through stored scripts API. Note that +the stored scripts API is not yet available in the high-level REST client, so in this +example we use the low-level REST client. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[register-script] +-------------------------------------------------- + +Instead of providing an inline script, we can refer to this registered template in the request: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-request-stored] +-------------------------------------------------- + +===== Rendering Templates + +Given parameter values, a template can be rendered without executing a search: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[render-search-template-request] +-------------------------------------------------- +<1> Setting `simulate` to `true` causes the search template to only be rendered. + +Both inline and pre-registered templates can be rendered. + +===== Optional Arguments + +As in standard search requests, the `explain` and `profile` options are supported: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-request-options] +-------------------------------------------------- + +===== Additional References + +The {ref}/search-template.html[Search Template documentation] contains further examples of how search requests can be templated. + +[[java-rest-high-search-template-sync]] +==== Synchronous Execution + +The `searchTemplate` method executes the request synchronously: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-execute] +-------------------------------------------------- + +==== Asynchronous Execution + +A search template request can be executed asynchronously through the `searchTemplateAsync` +method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-execute-async] +-------------------------------------------------- +<1> The `SearchTemplateRequest` to execute and the `ActionListener` to call when the execution completes. + +The asynchronous method does not block and returns immediately. Once the request completes, the +`ActionListener` is called back using the `onResponse` method if the execution completed successfully, +or using the `onFailure` method if it failed. + +A typical listener for `SearchTemplateResponse` is constructed as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. +<2> Called when the whole `SearchTemplateRequest` fails. + +==== Search Template Response + +For a standard search template request, the response contains a `SearchResponse` object +with the result of executing the search: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[search-template-response] +-------------------------------------------------- + +If `simulate` was set to `true` in the request, then the response +will contain the rendered search source instead of a `SearchResponse`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SearchDocumentationIT.java[render-search-template-response] +-------------------------------------------------- +<1> The rendered source in bytes, in our example `{"query": { "match" : { "title" : "elasticsearch" }}, "size" : 5}`. diff --git a/docs/java-rest/high-level/snapshot/create_repository.asciidoc b/docs/java-rest/high-level/snapshot/create_repository.asciidoc new file mode 100644 index 0000000000000..5c54529209720 --- /dev/null +++ b/docs/java-rest/high-level/snapshot/create_repository.asciidoc @@ -0,0 +1,139 @@ +[[java-rest-high-snapshot-create-repository]] +=== Snapshot Create RepositoryAPI + +The Snapshot Create RepositoryAPI allows to register a snapshot repository. + +[[java-rest-high-snapshot-create-repository-request]] +==== Snapshot Create RepositoryRequest + +A `PutRepositoryRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request] +-------------------------------------------------- + +==== Repository Settings +Settings requirements will differ based on the repository backend chosen. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-repository-settings] +-------------------------------------------------- +<1> Sets the repository settings + +==== Providing the Settings +The settings to be applied can be provided in different ways: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-create-settings] +-------------------------------------------------- +<1> Settings provided as `Settings` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-builder] +-------------------------------------------------- +<1> Settings provided as `Settings.Builder` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-source] +-------------------------------------------------- +<1> Settings provided as `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-map] +-------------------------------------------------- +<1> Settings provided as a `Map` + +==== Required Arguments +The following arguments must be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-name] +-------------------------------------------------- +<1> The name of the repository + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-type] +-------------------------------------------------- +<1> The type of the repository + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-verify] +-------------------------------------------------- +<1> Verify after creation as a `Boolean` + +[[java-rest-high-snapshot-create-repository-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute] +-------------------------------------------------- + +[[java-rest-high-snapshot-create-repository-async]] +==== Asynchronous Execution + +The asynchronous execution of a repository put settings requires both the +`PutRepositoryRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute-async] +-------------------------------------------------- +<1> The `PutRepositoryRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `PutRepositoryResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-snapshot-create-repository-response]] +==== Snapshot Create RepositoryResponse + +The returned `PutRepositoryResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-response] +-------------------------------------------------- +<1> Indicates the node has acknowledged the request diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index 2dee4643e73eb..b00047359a5d7 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -31,6 +31,7 @@ The Java High Level REST Client supports the following Search APIs: * <> * <> * <> +* <> * <> * <> * <> @@ -38,6 +39,7 @@ The Java High Level REST Client supports the following Search APIs: include::search/search.asciidoc[] include::search/scroll.asciidoc[] include::search/multi-search.asciidoc[] +include::search/search-template.asciidoc[] include::search/field-caps.asciidoc[] include::search/rank-eval.asciidoc[] @@ -102,8 +104,10 @@ include::indices/put_template.asciidoc[] The Java High Level REST Client supports the following Cluster APIs: * <> +* <> include::cluster/put_settings.asciidoc[] +include::cluster/list_tasks.asciidoc[] == Snapshot APIs @@ -111,4 +115,5 @@ The Java High Level REST Client supports the following Snapshot APIs: * <> -include::snapshot/get_repository.asciidoc[] \ No newline at end of file +include::snapshot/get_repository.asciidoc[] +include::snapshot/create_repository.asciidoc[] diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index a51200fb7fef0..8cf2bc0a73c92 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -84,11 +84,7 @@ A service account file looks like this: "private_key_id": "...", "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", - "client_id": "...", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "..." + "client_id": "..." } ---- // NOTCONSOLE @@ -178,6 +174,12 @@ are marked as `Secure`. a custom name can be useful to authenticate your cluster when requests statistics are logged in the Google Cloud Platform. Default to `repository-gcs` +`project_id`:: + + The Google Cloud project id. This will be automatically infered from the credentials file but + can be specified explicitly. For example, it can be used to switch between projects when the + same credentials are usable for both the production and the development projects. + [[repository-gcs-repository]] ==== Repository Settings diff --git a/docs/reference/aggregations/pipeline.asciidoc b/docs/reference/aggregations/pipeline.asciidoc index bd1b0284a84fb..37c1c357007b0 100644 --- a/docs/reference/aggregations/pipeline.asciidoc +++ b/docs/reference/aggregations/pipeline.asciidoc @@ -72,6 +72,7 @@ POST /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] <1> The metric is called `"the_sum"` <2> The `buckets_path` refers to the metric via a relative path `"the_sum"` @@ -136,6 +137,7 @@ POST /_search } -------------------------------------------------- // CONSOLE +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] <1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram The `buckets_path` can also use `"_bucket_count"` and path to a multi-bucket aggregation to use the number of buckets @@ -231,6 +233,7 @@ include::pipeline/stats-bucket-aggregation.asciidoc[] include::pipeline/extended-stats-bucket-aggregation.asciidoc[] include::pipeline/percentiles-bucket-aggregation.asciidoc[] include::pipeline/movavg-aggregation.asciidoc[] +include::pipeline/movfn-aggregation.asciidoc[] include::pipeline/cumulative-sum-aggregation.asciidoc[] include::pipeline/bucket-script-aggregation.asciidoc[] include::pipeline/bucket-selector-aggregation.asciidoc[] diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index db73510216be0..39a8255c90705 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -1,6 +1,10 @@ [[search-aggregations-pipeline-movavg-aggregation]] === Moving Average Aggregation +deprecated[6.4.0, The Moving Average aggregation has been deprecated in favor of the more general +<>. The new Moving Function aggregation provides +all the same functionality as the Moving Average aggregation, but also provides more flexibility.] + Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average value of that window. For example, given the data `[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]`, we can calculate a simple moving average with windows size of `5` as follows: @@ -74,6 +78,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] <1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals <2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) @@ -180,6 +185,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] A `simple` model has no special settings to configure @@ -233,6 +239,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] A `linear` model has no special settings to configure @@ -295,7 +302,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] - +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] [[single_0.2alpha]] .EWMA with window of size 10, alpha = 0.2 @@ -355,6 +362,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] In practice, the `alpha` value behaves very similarly in `holt` as `ewma`: small values produce more smoothing and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult @@ -446,7 +454,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] - +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] [[holt_winters_add]] .Holt-Winters moving average with window of size 120, alpha = 0.5, beta = 0.7, gamma = 0.3, period = 30 @@ -508,6 +516,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] ==== Prediction @@ -550,6 +559,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] The `simple`, `linear` and `ewma` models all produce "flat" predictions: they essentially converge on the mean of the last value in the series, producing a flat: @@ -631,6 +641,7 @@ POST /_search -------------------------------------------------- // CONSOLE // TEST[setup:sales] +// TEST[warning:The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.] <1> Minimization is enabled with the `minimize` parameter diff --git a/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc new file mode 100644 index 0000000000000..b05c56b880560 --- /dev/null +++ b/docs/reference/aggregations/pipeline/movfn-aggregation.asciidoc @@ -0,0 +1,633 @@ +[[search-aggregations-pipeline-movfn-aggregation]] +=== Moving Function Aggregation + +Given an ordered series of data, the Moving Function aggregation will slide a window across the data and allow the user to specify a custom +script that is executed on each window of data. For convenience, a number of common functions are predefined such as min/max, moving averages, +etc. + +This is conceptually very similar to the <> pipeline aggregation, except +it provides more functionality. +==== Syntax + +A `moving_fn` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.min(values)" + } +} +-------------------------------------------------- +// NOTCONSOLE + +.`moving_avg` Parameters +|=== +|Parameter Name |Description |Required |Default Value +|`buckets_path` |Path to the metric of interest (see <> for more details |Required | +|`window` |The size of window to "slide" across the histogram. |Required | +|`script` |The script that should be executed on each window of data |Required | +|=== + +`moving_fn` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be +embedded like any other metric aggregation: + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ <1> + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } <2> + }, + "the_movfn": { + "moving_fn": { + "buckets_path": "the_sum", <3> + "window": 10, + "script": "MovingFunctions.unweightedAvg(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals +<2> A `sum` metric is used to calculate the sum of a field. This could be any numeric metric (sum, min, max, etc) +<3> Finally, we specify a `moving_fn` aggregation which uses "the_sum" metric as its input. + +Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally +add numeric metrics, such as a `sum`, inside of that histogram. Finally, the `moving_fn` is embedded inside the histogram. +The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram (see +<> for a description of the syntax for `buckets_path`. + +An example response from the above aggregation may look like: + +[source,js] +-------------------------------------------------- +{ + "took": 11, + "timed_out": false, + "_shards": ..., + "hits": ..., + "aggregations": { + "my_date_histo": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "the_sum": { + "value": 550.0 + }, + "the_movfn": { + "value": null + } + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "the_sum": { + "value": 60.0 + }, + "the_movfn": { + "value": 550.0 + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "the_sum": { + "value": 375.0 + }, + "the_movfn": { + "value": 305.0 + } + } + ] + } + } +} +-------------------------------------------------- +// TESTRESPONSE[s/"took": 11/"took": $body.took/] +// TESTRESPONSE[s/"_shards": \.\.\./"_shards": $body._shards/] +// TESTRESPONSE[s/"hits": \.\.\./"hits": $body.hits/] + + +==== Custom user scripting + +The Moving Function aggregation allows the user to specify any arbitrary script to define custom logic. The script is invoked each time a +new window of data is collected. These values are provided to the script in the `values` variable. The script should then perform some +kind of calculation and emit a single `double` as the result. Emitting `null` is not permitted, although `NaN` and +/- `Inf` are allowed. + +For example, this script will simply return the first value from the window, or `NaN` if no values are available: + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "return values.length > 0 ? values[0] : Double.NaN" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +==== Pre-built Functions + +For convenience, a number of functions have been prebuilt and are available inside the `moving_fn` script context: + +- `max()` +- `min()` +- `sum()` +- `stdDev()` +- `unweightedAvg()` +- `linearWeightedAvg()` +- `ewma()` +- `holt()` +- `holtWinters()` + +The functions are available from the `MovingFunctions` namespace. E.g. `MovingFunctions.max()` + +===== max Function + +This function accepts a collection of doubles and returns the maximum value in that window. `null` and `NaN` values are ignored; the maximum +is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. + +.`max(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the maximum +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_moving_max": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.max(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +===== min Function + +This function accepts a collection of doubles and returns the minimum value in that window. `null` and `NaN` values are ignored; the minimum +is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. + +.`min(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the minimum +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_moving_min": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.min(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +===== sum Function + +This function accepts a collection of doubles and returns the sum of the values in that window. `null` and `NaN` values are ignored; +the sum is only calculated over the real values. If the window is empty, or all values are `null`/`NaN`, `0.0` is returned as the result. + +.`sum(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_moving_sum": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.sum(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +===== stdDev Function + +This function accepts a collection of doubles and and average, then returns the standard deviation of the values in that window. +`null` and `NaN` values are ignored; the sum is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `0.0` is returned as the result. + +.`stdDev(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the standard deviation of +|`avg` |The average of the window +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_moving_sum": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +The `avg` parameter must be provided to the standard deviation function because different styles of averages can be computed on the window +(simple, linearly weighted, etc). The various moving averages that are detailed below can be used to calculate the average for the +standard deviation function. + +===== unweightedAvg Function + +The `unweightedAvg` function calculates the sum of all values in the window, then divides by the size of the window. It is effectively +a simple arithmetic mean of the window. The simple moving average does not perform any time-dependent weighting, which means +the values from a `simple` moving average tend to "lag" behind the real data. + +`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` +values. + +.`unweightedAvg(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.unweightedAvg(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +==== linearWeightedAvg Function + +The `linearWeightedAvg` function assigns a linear weighting to points in the series, such that "older" datapoints (e.g. those at +the beginning of the window) contribute a linearly less amount to the total average. The linear weighting helps reduce +the "lag" behind the data's mean, since older points have less influence. + +If the window is empty, or all values are `null`/`NaN`, `NaN` is returned as the result. + +.`linearWeightedAvg(double[] values)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.linearWeightedAvg(values)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +==== ewma Function + +The `ewma` function (aka "single-exponential") is similar to the `linearMovAvg` function, +except older data-points become exponentially less important, +rather than linearly less important. The speed at which the importance decays can be controlled with an `alpha` +setting. Small values make the weight decay slowly, which provides greater smoothing and takes into account a larger +portion of the window. Larger valuers make the weight decay quickly, which reduces the impact of older values on the +moving average. This tends to make the moving average track the data more closely but with less smoothing. + +`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` +values. + +.`ewma(double[] values, double alpha)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|`alpha` |Exponential decay +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.ewma(values, 0.3)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + + +==== holt Function + +The `holt` function (aka "double exponential") incorporates a second exponential term which +tracks the data's trend. Single exponential does not perform well when the data has an underlying linear trend. The +double exponential model calculates two values internally: a "level" and a "trend". + +The level calculation is similar to `ewma`, and is an exponentially weighted view of the data. The difference is +that the previously smoothed value is used instead of the raw value, which allows it to stay close to the original series. +The trend calculation looks at the difference between the current and last value (e.g. the slope, or trend, of the +smoothed data). The trend value is also exponentially weighted. + +Values are produced by multiplying the level and trend components. + +`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` +values. + +.`holt(double[] values, double alpha)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|`alpha` |Level decay value +|`beta` |Trend decay value +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "MovingFunctions.holt(values, 0.3, 0.1)" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +In practice, the `alpha` value behaves very similarly in `holtMovAvg` as `ewmaMovAvg`: small values produce more smoothing +and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult +to see. Small values emphasize long-term trends (such as a constant linear trend in the whole series), while larger +values emphasize short-term trends. + +==== holtWinters Function + +The `holtWinters` function (aka "triple exponential") incorporates a third exponential term which +tracks the seasonal aspect of your data. This aggregation therefore smooths based on three components: "level", "trend" +and "seasonality". + +The level and trend calculation is identical to `holt` The seasonal calculation looks at the difference between +the current point, and the point one period earlier. + +Holt-Winters requires a little more handholding than the other moving averages. You need to specify the "periodicity" +of your data: e.g. if your data has cyclic trends every 7 days, you would set `period = 7`. Similarly if there was +a monthly trend, you would set it to `30`. There is currently no periodicity detection, although that is planned +for future enhancements. + +`null` and `NaN` values are ignored; the average is only calculated over the real values. If the window is empty, or all values are +`null`/`NaN`, `NaN` is returned as the result. This means that the count used in the average calculation is count of non-`null`,non-`NaN` +values. + +.`holtWinters(double[] values, double alpha)` Parameters +|=== +|Parameter Name |Description +|`values` |The window of values to find the sum of +|`alpha` |Level decay value +|`beta` |Trend decay value +|`gamma` |Seasonality decay value +|`period` |The periodicity of the data +|`multiplicative` |True if you wish to use multiplicative holt-winters, false to use additive +|=== + +[source,js] +-------------------------------------------------- +POST /_search +{ + "size": 0, + "aggs": { + "my_date_histo":{ + "date_histogram":{ + "field":"date", + "interval":"1M" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "price" } + }, + "the_movavg": { + "moving_fn": { + "buckets_path": "the_sum", + "window": 10, + "script": "if (values.length > 5*2) {MovingFunctions.holtWinters(values, 0.3, 0.1, 0.1, 5, false)}" + } + } + } + } + } +} +-------------------------------------------------- +// CONSOLE +// TEST[setup:sales] + +[WARNING] +====== +Multiplicative Holt-Winters works by dividing each data point by the seasonal value. This is problematic if any of +your data is zero, or if there are gaps in the data (since this results in a divid-by-zero). To combat this, the +`mult` Holt-Winters pads all values by a very small amount (1*10^-10^) so that all values are non-zero. This affects +the result, but only minimally. If your data is non-zero, or you prefer to see `NaN` when zero's are encountered, +you can disable this behavior with `pad: false` +====== + +===== "Cold Start" + +Unfortunately, due to the nature of Holt-Winters, it requires two periods of data to "bootstrap" the algorithm. This +means that your `window` must always be *at least* twice the size of your period. An exception will be thrown if it +isn't. It also means that Holt-Winters will not emit a value for the first `2 * period` buckets; the current algorithm +does not backcast. + +You'll notice in the above example we have an `if ()` statement checking the size of values. This is checking to make sure +we have two periods worth of data (`5 * 2`, where 5 is the period specified in the `holtWintersMovAvg` function) before calling +the holt-winters function. diff --git a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc index 53c7d913ad2f1..cc873a4fe89ff 100644 --- a/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/fingerprint-analyzer.asciidoc @@ -9,20 +9,6 @@ Input text is lowercased, normalized to remove extended characters, sorted, deduplicated and concatenated into a single token. If a stopword list is configured, stop words will also be removed. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters (in order):: -1. <> -2. <> -3. <> (disabled by default) -4. <> - [float] === Example output @@ -149,3 +135,46 @@ The above example produces the following term: --------------------------- [ consistent godel said sentence yes ] --------------------------- + +[float] +=== Definition + +The `fingerprint` tokenizer consists of: + +Tokenizer:: +* <> + +Token Filters (in order):: +* <> +* <> +* <> (disabled by default) +* <> + +If you need to customize the `fingerprint` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`fingerprint` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /fingerprint_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_fingerprint": { + "tokenizer": "standard", + "filter": [ + "lowercase", + "asciifolding", + "fingerprint" + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: fingerprint_example, first: fingerprint, second: rebuilt_fingerprint}\nendyaml\n/] diff --git a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc index cc94f3b757e37..954b514ced605 100644 --- a/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/keyword-analyzer.asciidoc @@ -4,14 +4,6 @@ The `keyword` analyzer is a ``noop'' analyzer which returns the entire input string as a single token. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -57,3 +49,40 @@ The above sentence would produce the following single term: === Configuration The `keyword` analyzer is not configurable. + +[float] +=== Definition + +The `keyword` analyzer consists of: + +Tokenizer:: +* <> + +If you need to customize the `keyword` analyzer then you need to +recreate it as a `custom` analyzer and modify it, usually by adding +token filters. Usually, you should prefer the +<> when you want strings that are not split +into tokens, but just in case you need it, this would recreate the +built-in `keyword` analyzer and you can use it as a starting point +for further customization: + +[source,js] +---------------------------------------------------- +PUT /keyword_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_keyword": { + "tokenizer": "keyword", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: keyword_example, first: keyword, second: rebuilt_keyword}\nendyaml\n/] +<1> You'd add any token filters here. diff --git a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc index 64ab3999ef9a9..027f37280a67d 100644 --- a/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/pattern-analyzer.asciidoc @@ -19,19 +19,6 @@ Read more about http://www.regular-expressions.info/catastrophic.html[pathologic ======================================== - -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters:: -* <> -* <> (disabled by default) - [float] === Example output @@ -378,3 +365,51 @@ The regex above is easier to understand as: [\p{L}&&[^\p{Lu}]] # then lower case ) -------------------------------------------------- + +[float] +=== Definition + +The `pattern` anlayzer consists of: + +Tokenizer:: +* <> + +Token Filters:: +* <> +* <> (disabled by default) + +If you need to customize the `pattern` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`pattern` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /pattern_example +{ + "settings": { + "analysis": { + "tokenizer": { + "split_on_non_word": { + "type": "pattern", + "pattern": "\\W+" <1> + } + }, + "analyzer": { + "rebuilt_pattern": { + "tokenizer": "split_on_non_word", + "filter": [ + "lowercase" <2> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: pattern_example, first: pattern, second: rebuilt_pattern}\nendyaml\n/] +<1> The default pattern is `\W+` which splits on non-word characters +and this is where you'd change it. +<2> You'd add other token filters after `lowercase`. diff --git a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc index a57c30d8dd622..d82655d9bd8e1 100644 --- a/docs/reference/analysis/analyzers/simple-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/simple-analyzer.asciidoc @@ -4,14 +4,6 @@ The `simple` analyzer breaks text into terms whenever it encounters a character which is not a letter. All terms are lower cased. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -127,3 +119,37 @@ The above sentence would produce the following terms: === Configuration The `simple` analyzer is not configurable. + +[float] +=== Definition + +The `simple` analzyer consists of: + +Tokenizer:: +* <> + +If you need to customize the `simple` analyzer then you need to recreate +it as a `custom` analyzer and modify it, usually by adding token filters. +This would recreate the built-in `simple` analyzer and you can use it as +a starting point for further customization: + +[source,js] +---------------------------------------------------- +PUT /simple_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_simple": { + "tokenizer": "lowercase", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: simple_example, first: simple, second: rebuilt_simple}\nendyaml\n/] +<1> You'd add any token filters here. diff --git a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc index eacbb1c3cad99..20aa072066b5f 100644 --- a/docs/reference/analysis/analyzers/standard-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/standard-analyzer.asciidoc @@ -7,19 +7,6 @@ Segmentation algorithm, as specified in http://unicode.org/reports/tr29/[Unicode Standard Annex #29]) and works well for most languages. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - -Token Filters:: -* <> -* <> -* <> (disabled by default) - [float] === Example output @@ -276,3 +263,44 @@ The above example produces the following terms: --------------------------- [ 2, quick, brown, foxes, jumpe, d, over, lazy, dog's, bone ] --------------------------- + +[float] +=== Definition + +The `standard` analyzer consists of: + +Tokenizer:: +* <> + +Token Filters:: +* <> +* <> +* <> (disabled by default) + +If you need to customize the `standard` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`standard` analyzer and you can use it as a starting point: + +[source,js] +---------------------------------------------------- +PUT /standard_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_standard": { + "tokenizer": "standard", + "filter": [ + "standard", + "lowercase" <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: standard_example, first: standard, second: rebuilt_standard}\nendyaml\n/] +<1> You'd add any token filters after `lowercase`. diff --git a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc index eacc7e106e799..1b84797d94761 100644 --- a/docs/reference/analysis/analyzers/stop-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/stop-analyzer.asciidoc @@ -5,17 +5,6 @@ The `stop` analyzer is the same as the <> - -Token filters:: -* <> - [float] === Example output @@ -239,3 +228,50 @@ The above example produces the following terms: --------------------------- [ quick, brown, foxes, jumped, lazy, dog, s, bone ] --------------------------- + +[float] +=== Definition + +It consists of: + +Tokenizer:: +* <> + +Token filters:: +* <> + +If you need to customize the `stop` analyzer beyond the configuration +parameters then you need to recreate it as a `custom` analyzer and modify +it, usually by adding token filters. This would recreate the built-in +`stop` analyzer and you can use it as a starting point for further +customization: + +[source,js] +---------------------------------------------------- +PUT /stop_example +{ + "settings": { + "analysis": { + "filter": { + "english_stop": { + "type": "stop", + "stopwords": "_english_" <1> + } + }, + "analyzer": { + "rebuilt_stop": { + "tokenizer": "lowercase", + "filter": [ + "english_stop" <2> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: stop_example, first: stop, second: rebuilt_stop}\nendyaml\n/] +<1> The default stopwords can be overridden with the `stopwords` + or `stopwords_path` parameters. +<2> You'd add any token filters after `english_stop`. diff --git a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc index f95e5c6e4ab65..31ba8d9ce8f24 100644 --- a/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc +++ b/docs/reference/analysis/analyzers/whitespace-analyzer.asciidoc @@ -4,14 +4,6 @@ The `whitespace` analyzer breaks text into terms whenever it encounters a whitespace character. -[float] -=== Definition - -It consists of: - -Tokenizer:: -* <> - [float] === Example output @@ -120,3 +112,37 @@ The above sentence would produce the following terms: === Configuration The `whitespace` analyzer is not configurable. + +[float] +=== Definition + +It consists of: + +Tokenizer:: +* <> + +If you need to customize the `whitespace` analyzer then you need to +recreate it as a `custom` analyzer and modify it, usually by adding +token filters. This would recreate the built-in `whitespace` analyzer +and you can use it as a starting point for further customization: + +[source,js] +---------------------------------------------------- +PUT /whitespace_example +{ + "settings": { + "analysis": { + "analyzer": { + "rebuilt_whitespace": { + "tokenizer": "whitespace", + "filter": [ <1> + ] + } + } + } + } +} +---------------------------------------------------- +// CONSOLE +// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: whitespace_example, first: whitespace, second: rebuilt_whitespace}\nendyaml\n/] +<1> You'd add any token filters here. diff --git a/docs/reference/index-modules/translog.asciidoc b/docs/reference/index-modules/translog.asciidoc index b1eb36e346d9f..bed19bd5be1df 100644 --- a/docs/reference/index-modules/translog.asciidoc +++ b/docs/reference/index-modules/translog.asciidoc @@ -108,8 +108,8 @@ provide a command-line tool for this, `elasticsearch-translog`. [WARNING] The `elasticsearch-translog` tool should *not* be run while Elasticsearch is -running, and you will permanently lose the documents that were contained only in -the translog! +running. If you attempt to run this tool while Elasticsearch is running, you +will permanently lose the documents that were contained only in the translog! In order to run the `elasticsearch-translog` tool, specify the `truncate` subcommand as well as the directory for the corrupted translog with the `-d` diff --git a/docs/reference/index-shared4.asciidoc b/docs/reference/index-shared4.asciidoc index 5e6ebc8a5a20c..3dc9e4f5e07cf 100644 --- a/docs/reference/index-shared4.asciidoc +++ b/docs/reference/index-shared4.asciidoc @@ -5,4 +5,6 @@ include::testing.asciidoc[] include::glossary.asciidoc[] -include::{docdir}/../CHANGELOG.asciidoc[] \ No newline at end of file +include::release-notes/highlights.asciidoc[] + +include::release-notes.asciidoc[] \ No newline at end of file diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 693d537d732c1..f70857e66c86f 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -289,6 +289,20 @@ By setting `include_global_state` to false it's possible to prevent the cluster the snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have all primary shards available. This behaviour can be changed by setting `partial` to `true`. +Snapshot names can be automatically derived using <>, similarly as when creating +new indices. Note that special characters need to be URI encoded. + +For example, creating a snapshot with the current day in the name, like `snapshot-2018.05.11`, can be achieved with +the following command: +[source,js] +----------------------------------- +# PUT /_snapshot/my_backup/ +PUT /_snapshot/my_backup/%3Csnapshot-%7Bnow%2Fd%7D%3E +----------------------------------- +// CONSOLE +// TEST[continued] + + The index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses the list of the index files that are already stored in the repository and copies only files that were created or changed since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form. diff --git a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc index a1b427acf2718..21a689703e01e 100644 --- a/docs/reference/query-dsl/geo-bounding-box-query.asciidoc +++ b/docs/reference/query-dsl/geo-bounding-box-query.asciidoc @@ -329,3 +329,16 @@ and will not match any documents for this query. This can be useful when querying multiple indexes which might have different mappings. When set to `false` (the default value) the query will throw an exception if the field is not mapped. + +[float] +==== Notes on Precision + +Geopoints have limited precision and are always rounded down during index time. +During the query time, upper boundaries of the bounding boxes are rounded down, +while lower boundaries are rounded up. As a result, the points along on the +lower bounds (bottom and left edges of the bounding box) might not make it into +the bounding box due to the rounding error. At the same time points alongside +the upper bounds (top and right edges) might be selected by the query even if +they are located slightly outside the edge. The rounding error should be less +than 4.20e-8 degrees on the latitude and less than 8.39e-8 degrees on the +longitude, which translates to less than 1cm error even at the equator. diff --git a/docs/reference/release-notes/highlights-7.0.0.asciidoc b/docs/reference/release-notes/highlights-7.0.0.asciidoc new file mode 100644 index 0000000000000..d01d543c8257e --- /dev/null +++ b/docs/reference/release-notes/highlights-7.0.0.asciidoc @@ -0,0 +1,9 @@ +[[release-highlights-7.0.0]] +== 7.0.0 release highlights +++++ +7.0.0 +++++ + +coming[7.0.0] + +See also <> and <>. diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc new file mode 100644 index 0000000000000..0ab4106c22c1f --- /dev/null +++ b/docs/reference/release-notes/highlights.asciidoc @@ -0,0 +1,16 @@ +[[release-highlights]] += {es} Release Highlights +++++ +Release Highlights +++++ + +[partintro] +-- +This section summarizes the most important changes in each release. For the +full list, see <> and <>. + +* <> + +-- + +include::highlights-7.0.0.asciidoc[] \ No newline at end of file diff --git a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java index a36df9987e7de..02bc304317e68 100644 --- a/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java +++ b/docs/src/test/java/org/elasticsearch/smoketest/DocsClientYamlTestSuiteIT.java @@ -86,9 +86,13 @@ protected boolean randomizeContentType() { } @Override - protected ClientYamlTestClient initClientYamlTestClient(ClientYamlSuiteRestSpec restSpec, RestClient restClient, - List hosts, Version esVersion) throws IOException { - return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion); + protected ClientYamlTestClient initClientYamlTestClient( + final ClientYamlSuiteRestSpec restSpec, + final RestClient restClient, + final List hosts, + final Version esVersion, + final Version masterVersion) throws IOException { + return new ClientYamlDocsTestClient(restSpec, restClient, hosts, esVersion, masterVersion); } /** diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java index 000e871e92781..ef1e188a22e0a 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesChannelContext.java @@ -20,25 +20,13 @@ package org.elasticsearch.nio; import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.LinkedList; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.BiConsumer; import java.util.function.Consumer; public class BytesChannelContext extends SocketChannelContext { - private final ReadConsumer readConsumer; - private final InboundChannelBuffer channelBuffer; - private final LinkedList queued = new LinkedList<>(); - private final AtomicBoolean isClosing = new AtomicBoolean(false); - public BytesChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, - ReadConsumer readConsumer, InboundChannelBuffer channelBuffer) { - super(channel, selector, exceptionHandler); - this.readConsumer = readConsumer; - this.channelBuffer = channelBuffer; + ReadWriteHandler handler, InboundChannelBuffer channelBuffer) { + super(channel, selector, exceptionHandler, handler, channelBuffer); } @Override @@ -56,55 +44,30 @@ public int read() throws IOException { channelBuffer.incrementIndex(bytesRead); - int bytesConsumed = Integer.MAX_VALUE; - while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) { - bytesConsumed = readConsumer.consumeReads(channelBuffer); - channelBuffer.release(bytesConsumed); - } + handleReadBytes(); return bytesRead; } - @Override - public void sendMessage(ByteBuffer[] buffers, BiConsumer listener) { - if (isClosing.get()) { - listener.accept(null, new ClosedChannelException()); - return; - } - - BytesWriteOperation writeOperation = new BytesWriteOperation(this, buffers, listener); - SocketSelector selector = getSelector(); - if (selector.isOnCurrentThread() == false) { - selector.queueWrite(writeOperation); - return; - } - - selector.queueWriteInChannelBuffer(writeOperation); - } - - @Override - public void queueWriteOperation(WriteOperation writeOperation) { - getSelector().assertOnSelectorThread(); - queued.add((BytesWriteOperation) writeOperation); - } - @Override public void flushChannel() throws IOException { getSelector().assertOnSelectorThread(); - int ops = queued.size(); - if (ops == 1) { - singleFlush(queued.pop()); - } else if (ops > 1) { - multiFlush(); + boolean lastOpCompleted = true; + FlushOperation flushOperation; + while (lastOpCompleted && (flushOperation = getPendingFlush()) != null) { + try { + if (singleFlush(flushOperation)) { + currentFlushOperationComplete(); + } else { + lastOpCompleted = false; + } + } catch (IOException e) { + currentFlushOperationFailed(e); + throw e; + } } } - @Override - public boolean hasQueuedWriteOps() { - getSelector().assertOnSelectorThread(); - return queued.isEmpty() == false; - } - @Override public void closeChannel() { if (isClosing.compareAndSet(false, true)) { @@ -117,51 +80,12 @@ public boolean selectorShouldClose() { return isPeerClosed() || hasIOException() || isClosing.get(); } - @Override - public void closeFromSelector() throws IOException { - getSelector().assertOnSelectorThread(); - if (channel.isOpen()) { - IOException channelCloseException = null; - try { - super.closeFromSelector(); - } catch (IOException e) { - channelCloseException = e; - } - // Set to true in order to reject new writes before queuing with selector - isClosing.set(true); - channelBuffer.close(); - for (BytesWriteOperation op : queued) { - getSelector().executeFailedListener(op.getListener(), new ClosedChannelException()); - } - queued.clear(); - if (channelCloseException != null) { - throw channelCloseException; - } - } - } - - private void singleFlush(BytesWriteOperation headOp) throws IOException { - try { - int written = flushToChannel(headOp.getBuffersToWrite()); - headOp.incrementIndex(written); - } catch (IOException e) { - getSelector().executeFailedListener(headOp.getListener(), e); - throw e; - } - - if (headOp.isFullyFlushed()) { - getSelector().executeListener(headOp.getListener(), null); - } else { - queued.push(headOp); - } - } - - private void multiFlush() throws IOException { - boolean lastOpCompleted = true; - while (lastOpCompleted && queued.isEmpty() == false) { - BytesWriteOperation op = queued.pop(); - singleFlush(op); - lastOpCompleted = op.isFullyFlushed(); - } + /** + * Returns a boolean indicating if the operation was fully flushed. + */ + private boolean singleFlush(FlushOperation flushOperation) throws IOException { + int written = flushToChannel(flushOperation.getBuffersToWrite()); + flushOperation.incrementIndex(written); + return flushOperation.isFullyFlushed(); } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java new file mode 100644 index 0000000000000..ba379e2873210 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteHandler.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; + +public abstract class BytesWriteHandler implements ReadWriteHandler { + + private static final List EMPTY_LIST = Collections.emptyList(); + + public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener) { + assert message instanceof ByteBuffer[] : "This channel only supports messages that are of type: " + ByteBuffer[].class + + ". Found type: " + message.getClass() + "."; + return new FlushReadyWrite(context, (ByteBuffer[]) message, listener); + } + + public List writeToBytes(WriteOperation writeOperation) { + assert writeOperation instanceof FlushReadyWrite : "Write operation must be flush ready"; + return Collections.singletonList((FlushReadyWrite) writeOperation); + } + + public List pollFlushOperations() { + return EMPTY_LIST; + } + + public void close() {} +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushOperation.java similarity index 86% rename from libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java rename to libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushOperation.java index 37c6e49727634..3102c972a6795 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/BytesWriteOperation.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushOperation.java @@ -23,17 +23,15 @@ import java.util.Arrays; import java.util.function.BiConsumer; -public class BytesWriteOperation implements WriteOperation { +public class FlushOperation { - private final SocketChannelContext channelContext; private final BiConsumer listener; private final ByteBuffer[] buffers; private final int[] offsets; private final int length; private int internalIndex; - public BytesWriteOperation(SocketChannelContext channelContext, ByteBuffer[] buffers, BiConsumer listener) { - this.channelContext = channelContext; + public FlushOperation(ByteBuffer[] buffers, BiConsumer listener) { this.listener = listener; this.buffers = buffers; this.offsets = new int[buffers.length]; @@ -46,16 +44,10 @@ public BytesWriteOperation(SocketChannelContext channelContext, ByteBuffer[] buf length = offset; } - @Override public BiConsumer getListener() { return listener; } - @Override - public SocketChannelContext getChannel() { - return channelContext; - } - public boolean isFullyFlushed() { assert length >= internalIndex : "Should never have an index that is greater than the length [length=" + length + ", index=" + internalIndex + "]"; @@ -84,5 +76,4 @@ public ByteBuffer[] getBuffersToWrite() { return postIndexBuffers; } - } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java new file mode 100644 index 0000000000000..65bc8f17aaf4b --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/FlushReadyWrite.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.nio.ByteBuffer; +import java.util.function.BiConsumer; + +public class FlushReadyWrite extends FlushOperation implements WriteOperation { + + private final SocketChannelContext channelContext; + private final ByteBuffer[] buffers; + + FlushReadyWrite(SocketChannelContext channelContext, ByteBuffer[] buffers, BiConsumer listener) { + super(buffers, listener); + this.channelContext = channelContext; + this.buffers = buffers; + } + + @Override + public SocketChannelContext getChannel() { + return channelContext; + } + + @Override + public ByteBuffer[] getObject() { + return buffers; + } +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java new file mode 100644 index 0000000000000..f0637ea265280 --- /dev/null +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/ReadWriteHandler.java @@ -0,0 +1,71 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.nio; + +import java.io.IOException; +import java.util.List; +import java.util.function.BiConsumer; + +/** + * Implements the application specific logic for handling inbound and outbound messages for a channel. + */ +public interface ReadWriteHandler { + + /** + * This method is called when a message is queued with a channel. It can be called from any thread. + * This method should validate that the message is a valid type and return a write operation object + * to be queued with the channel + * + * @param context the channel context + * @param message the message + * @param listener the listener to be called when the message is sent + * @return the write operation to be queued + */ + WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener); + + /** + * This method is called on the event loop thread. It should serialize a write operation object to bytes + * that can be flushed to the raw nio channel. + * + * @param writeOperation to be converted to bytes + * @return the operations to flush the bytes to the channel + */ + List writeToBytes(WriteOperation writeOperation); + + /** + * Returns any flush operations that are ready to flush. This exists as a way to check if any flush + * operations were produced during a read call. + * + * @return flush operations + */ + List pollFlushOperations(); + + /** + * This method handles bytes that have been read from the network. It should return the number of bytes + * consumed so that they can be released. + * + * @param channelBuffer of bytes read from the network + * @return the number of bytes consumed + * @throws IOException if an exception occurs + */ + int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; + + void close() throws IOException; +} diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java index 3bf47a98e0267..f2d299a9d328a 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketChannelContext.java @@ -19,10 +19,16 @@ package org.elasticsearch.nio; +import org.elasticsearch.nio.utils.ExceptionsHelper; + import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.LinkedList; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiConsumer; import java.util.function.Consumer; @@ -33,21 +39,28 @@ * close behavior is required, it should be implemented in this context. * * The only methods of the context that should ever be called from a non-selector thread are - * {@link #closeChannel()} and {@link #sendMessage(ByteBuffer[], BiConsumer)}. + * {@link #closeChannel()} and {@link #sendMessage(Object, BiConsumer)}. */ public abstract class SocketChannelContext extends ChannelContext { protected final NioSocketChannel channel; + protected final InboundChannelBuffer channelBuffer; + protected final AtomicBoolean isClosing = new AtomicBoolean(false); + private final ReadWriteHandler readWriteHandler; private final SocketSelector selector; private final CompletableFuture connectContext = new CompletableFuture<>(); + private final LinkedList pendingFlushes = new LinkedList<>(); private boolean ioException; private boolean peerClosed; private Exception connectException; - protected SocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler) { + protected SocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, + ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer) { super(channel.getRawChannel(), exceptionHandler); this.selector = selector; this.channel = channel; + this.readWriteHandler = readWriteHandler; + this.channelBuffer = channelBuffer; } @Override @@ -108,15 +121,94 @@ public boolean connect() throws IOException { return isConnected; } - public abstract int read() throws IOException; + public void sendMessage(Object message, BiConsumer listener) { + if (isClosing.get()) { + listener.accept(null, new ClosedChannelException()); + return; + } - public abstract void sendMessage(ByteBuffer[] buffers, BiConsumer listener); + WriteOperation writeOperation = readWriteHandler.createWriteOperation(this, message, listener); + + SocketSelector selector = getSelector(); + if (selector.isOnCurrentThread() == false) { + selector.queueWrite(writeOperation); + return; + } + + selector.queueWriteInChannelBuffer(writeOperation); + } + + public void queueWriteOperation(WriteOperation writeOperation) { + getSelector().assertOnSelectorThread(); + pendingFlushes.addAll(readWriteHandler.writeToBytes(writeOperation)); + } - public abstract void queueWriteOperation(WriteOperation writeOperation); + public abstract int read() throws IOException; public abstract void flushChannel() throws IOException; - public abstract boolean hasQueuedWriteOps(); + protected void currentFlushOperationFailed(IOException e) { + FlushOperation flushOperation = pendingFlushes.pollFirst(); + getSelector().executeFailedListener(flushOperation.getListener(), e); + } + + protected void currentFlushOperationComplete() { + FlushOperation flushOperation = pendingFlushes.pollFirst(); + getSelector().executeListener(flushOperation.getListener(), null); + } + + protected FlushOperation getPendingFlush() { + return pendingFlushes.peekFirst(); + } + + @Override + public void closeFromSelector() throws IOException { + getSelector().assertOnSelectorThread(); + if (channel.isOpen()) { + ArrayList closingExceptions = new ArrayList<>(3); + try { + super.closeFromSelector(); + } catch (IOException e) { + closingExceptions.add(e); + } + // Set to true in order to reject new writes before queuing with selector + isClosing.set(true); + + // Poll for new flush operations to close + pendingFlushes.addAll(readWriteHandler.pollFlushOperations()); + FlushOperation flushOperation; + while ((flushOperation = pendingFlushes.pollFirst()) != null) { + selector.executeFailedListener(flushOperation.getListener(), new ClosedChannelException()); + } + + try { + readWriteHandler.close(); + } catch (IOException e) { + closingExceptions.add(e); + } + channelBuffer.close(); + + if (closingExceptions.isEmpty() == false) { + ExceptionsHelper.rethrowAndSuppress(closingExceptions); + } + } + } + + protected void handleReadBytes() throws IOException { + int bytesConsumed = Integer.MAX_VALUE; + while (bytesConsumed > 0 && channelBuffer.getIndex() > 0) { + bytesConsumed = readWriteHandler.consumeReads(channelBuffer); + channelBuffer.release(bytesConsumed); + } + + // Some protocols might produce messages to flush during a read operation. + pendingFlushes.addAll(readWriteHandler.pollFlushOperations()); + } + + public boolean readyForFlush() { + getSelector().assertOnSelectorThread(); + return pendingFlushes.isEmpty() == false; + } /** * This method indicates if a selector should close this channel. @@ -178,9 +270,4 @@ protected int flushToChannel(ByteBuffer[] buffers) throws IOException { throw e; } } - - @FunctionalInterface - public interface ReadConsumer { - int consumeReads(InboundChannelBuffer channelBuffer) throws IOException; - } } diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java index b1f738647619b..cacee47e96196 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/SocketEventHandler.java @@ -48,7 +48,7 @@ protected void handleRegistration(SocketChannelContext context) throws IOExcepti context.register(); SelectionKey selectionKey = context.getSelectionKey(); selectionKey.attach(context); - if (context.hasQueuedWriteOps()) { + if (context.readyForFlush()) { SelectionKeyUtils.setConnectReadAndWriteInterested(selectionKey); } else { SelectionKeyUtils.setConnectAndReadInterested(selectionKey); @@ -150,7 +150,7 @@ protected void postHandling(SocketChannelContext context) { } else { SelectionKey selectionKey = context.getSelectionKey(); boolean currentlyWriteInterested = SelectionKeyUtils.isWriteInterested(selectionKey); - boolean pendingWrites = context.hasQueuedWriteOps(); + boolean pendingWrites = context.readyForFlush(); if (currentlyWriteInterested == false && pendingWrites) { SelectionKeyUtils.setWriteInterested(selectionKey); } else if (currentlyWriteInterested && pendingWrites == false) { diff --git a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java index 665b9f7759e11..25de6ab7326f3 100644 --- a/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java +++ b/libs/elasticsearch-nio/src/main/java/org/elasticsearch/nio/WriteOperation.java @@ -16,7 +16,6 @@ * specific language governing permissions and limitations * under the License. */ - package org.elasticsearch.nio; import java.util.function.BiConsumer; @@ -24,11 +23,14 @@ /** * This is a basic write operation that can be queued with a channel. The only requirements of a write * operation is that is has a listener and a reference to its channel. The actual conversion of the write - * operation implementation to bytes will be performed by the {@link SocketChannelContext}. + * operation implementation to bytes will be performed by the {@link ReadWriteHandler}. */ public interface WriteOperation { BiConsumer getListener(); SocketChannelContext getChannel(); + + Object getObject(); + } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java index d9de0ab1361c3..addfcdedbf99f 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesChannelContextTests.java @@ -19,23 +19,19 @@ package org.elasticsearch.nio; +import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; import org.junit.Before; -import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; import java.util.function.BiConsumer; import java.util.function.Consumer; -import java.util.function.Supplier; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.isNull; -import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -43,20 +39,19 @@ public class BytesChannelContextTests extends ESTestCase { - private SocketChannelContext.ReadConsumer readConsumer; + private CheckedFunction readConsumer; private NioSocketChannel channel; private SocketChannel rawChannel; private BytesChannelContext context; private InboundChannelBuffer channelBuffer; private SocketSelector selector; - private Consumer exceptionHandler; private BiConsumer listener; private int messageLength; @Before @SuppressWarnings("unchecked") public void init() { - readConsumer = mock(SocketChannelContext.ReadConsumer.class); + readConsumer = mock(CheckedFunction.class); messageLength = randomInt(96) + 20; selector = mock(SocketSelector.class); @@ -64,9 +59,9 @@ public void init() { channel = mock(NioSocketChannel.class); rawChannel = mock(SocketChannel.class); channelBuffer = InboundChannelBuffer.allocatingInstance(); - exceptionHandler = mock(Consumer.class); + TestReadWriteHandler handler = new TestReadWriteHandler(readConsumer); when(channel.getRawChannel()).thenReturn(rawChannel); - context = new BytesChannelContext(channel, selector, exceptionHandler, readConsumer, channelBuffer); + context = new BytesChannelContext(channel, selector, mock(Consumer.class), handler, channelBuffer); when(selector.isOnCurrentThread()).thenReturn(true); } @@ -80,13 +75,13 @@ public void testSuccessfulRead() throws IOException { return bytes.length; }); - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, 0); + when(readConsumer.apply(channelBuffer)).thenReturn(messageLength, 0); assertEquals(messageLength, context.read()); assertEquals(0, channelBuffer.getIndex()); assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(1)).consumeReads(channelBuffer); + verify(readConsumer, times(1)).apply(channelBuffer); } public void testMultipleReadsConsumed() throws IOException { @@ -98,13 +93,13 @@ public void testMultipleReadsConsumed() throws IOException { return bytes.length; }); - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength, messageLength, 0); + when(readConsumer.apply(channelBuffer)).thenReturn(messageLength, messageLength, 0); assertEquals(bytes.length, context.read()); assertEquals(0, channelBuffer.getIndex()); assertEquals(BigArrays.BYTE_PAGE_SIZE - bytes.length, channelBuffer.getCapacity()); - verify(readConsumer, times(2)).consumeReads(channelBuffer); + verify(readConsumer, times(2)).apply(channelBuffer); } public void testPartialRead() throws IOException { @@ -117,20 +112,20 @@ public void testPartialRead() throws IOException { }); - when(readConsumer.consumeReads(channelBuffer)).thenReturn(0); + when(readConsumer.apply(channelBuffer)).thenReturn(0); assertEquals(messageLength, context.read()); assertEquals(bytes.length, channelBuffer.getIndex()); - verify(readConsumer, times(1)).consumeReads(channelBuffer); + verify(readConsumer, times(1)).apply(channelBuffer); - when(readConsumer.consumeReads(channelBuffer)).thenReturn(messageLength * 2, 0); + when(readConsumer.apply(channelBuffer)).thenReturn(messageLength * 2, 0); assertEquals(messageLength, context.read()); assertEquals(0, channelBuffer.getIndex()); assertEquals(BigArrays.BYTE_PAGE_SIZE - (bytes.length * 2), channelBuffer.getCapacity()); - verify(readConsumer, times(2)).consumeReads(channelBuffer); + verify(readConsumer, times(2)).apply(channelBuffer); } public void testReadThrowsIOException() throws IOException { @@ -157,186 +152,100 @@ public void testReadLessThanZeroMeansReadyForClose() throws IOException { assertTrue(context.selectorShouldClose()); } - @SuppressWarnings("unchecked") - public void testCloseClosesChannelBuffer() throws IOException { - try (SocketChannel realChannel = SocketChannel.open()) { - when(channel.getRawChannel()).thenReturn(realChannel); - context = new BytesChannelContext(channel, selector, exceptionHandler, readConsumer, channelBuffer); - - when(channel.isOpen()).thenReturn(true); - Runnable closer = mock(Runnable.class); - Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer); - InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); - buffer.ensureCapacity(1); - BytesChannelContext context = new BytesChannelContext(channel, selector, exceptionHandler, readConsumer, buffer); - context.closeFromSelector(); - verify(closer).run(); - } - } - - public void testWriteFailsIfClosing() { - context.closeChannel(); - - ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; - context.sendMessage(buffers, listener); - - verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); - } - - public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); - - when(selector.isOnCurrentThread()).thenReturn(false); - - ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; - context.sendMessage(buffers, listener); - - verify(selector).queueWrite(writeOpCaptor.capture()); - BytesWriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(context, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); - } - - public void testSendMessageFromSameThreadIsQueuedInChannel() { - ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(BytesWriteOperation.class); - - ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; - context.sendMessage(buffers, listener); - - verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); - BytesWriteOperation writeOp = writeOpCaptor.getValue(); - - assertSame(listener, writeOp.getListener()); - assertSame(context, writeOp.getChannel()); - assertEquals(buffers[0], writeOp.getBuffersToWrite()[0]); - } - - public void testWriteIsQueuedInChannel() { - assertFalse(context.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - context.queueWriteOperation(new BytesWriteOperation(context, buffer, listener)); - - assertTrue(context.hasQueuedWriteOps()); - } - - @SuppressWarnings("unchecked") - public void testWriteOpsClearedOnClose() throws Exception { - try (SocketChannel realChannel = SocketChannel.open()) { - when(channel.getRawChannel()).thenReturn(realChannel); - context = new BytesChannelContext(channel, selector, exceptionHandler, readConsumer, channelBuffer); - - assertFalse(context.hasQueuedWriteOps()); - - ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; - context.queueWriteOperation(new BytesWriteOperation(context, buffer, listener)); - - assertTrue(context.hasQueuedWriteOps()); - - when(channel.isOpen()).thenReturn(true); - context.closeFromSelector(); - - verify(selector).executeFailedListener(same(listener), any(ClosedChannelException.class)); - - assertFalse(context.hasQueuedWriteOps()); - } - } - + @SuppressWarnings("varargs") public void testQueuedWriteIsFlushedInFlushCall() throws Exception { - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); - context.queueWriteOperation(writeOperation); - assertTrue(context.hasQueuedWriteOps()); + FlushReadyWrite flushOperation = mock(FlushReadyWrite.class); + context.queueWriteOperation(flushOperation); + + assertTrue(context.readyForFlush()); - when(writeOperation.getBuffersToWrite()).thenReturn(buffers); - when(writeOperation.isFullyFlushed()).thenReturn(true); - when(writeOperation.getListener()).thenReturn(listener); + when(flushOperation.getBuffersToWrite()).thenReturn(buffers); + when(flushOperation.isFullyFlushed()).thenReturn(true); + when(flushOperation.getListener()).thenReturn(listener); context.flushChannel(); verify(rawChannel).write(buffers, 0, buffers.length); verify(selector).executeListener(listener, null); - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); } public void testPartialFlush() throws IOException { - assertFalse(context.hasQueuedWriteOps()); - - BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); - context.queueWriteOperation(writeOperation); + assertFalse(context.readyForFlush()); + FlushReadyWrite flushOperation = mock(FlushReadyWrite.class); + context.queueWriteOperation(flushOperation); + assertTrue(context.readyForFlush()); - assertTrue(context.hasQueuedWriteOps()); - - when(writeOperation.isFullyFlushed()).thenReturn(false); - when(writeOperation.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); + when(flushOperation.isFullyFlushed()).thenReturn(false); + when(flushOperation.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); context.flushChannel(); verify(listener, times(0)).accept(null, null); - assertTrue(context.hasQueuedWriteOps()); + assertTrue(context.readyForFlush()); } @SuppressWarnings("unchecked") public void testMultipleWritesPartialFlushes() throws IOException { - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); BiConsumer listener2 = mock(BiConsumer.class); - BytesWriteOperation writeOperation1 = mock(BytesWriteOperation.class); - BytesWriteOperation writeOperation2 = mock(BytesWriteOperation.class); - when(writeOperation1.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); - when(writeOperation2.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); - when(writeOperation1.getListener()).thenReturn(listener); - when(writeOperation2.getListener()).thenReturn(listener2); - context.queueWriteOperation(writeOperation1); - context.queueWriteOperation(writeOperation2); - - assertTrue(context.hasQueuedWriteOps()); - - when(writeOperation1.isFullyFlushed()).thenReturn(true); - when(writeOperation2.isFullyFlushed()).thenReturn(false); + FlushReadyWrite flushOperation1 = mock(FlushReadyWrite.class); + FlushReadyWrite flushOperation2 = mock(FlushReadyWrite.class); + when(flushOperation1.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); + when(flushOperation2.getBuffersToWrite()).thenReturn(new ByteBuffer[0]); + when(flushOperation1.getListener()).thenReturn(listener); + when(flushOperation2.getListener()).thenReturn(listener2); + + context.queueWriteOperation(flushOperation1); + context.queueWriteOperation(flushOperation2); + + assertTrue(context.readyForFlush()); + + when(flushOperation1.isFullyFlushed()).thenReturn(true); + when(flushOperation2.isFullyFlushed()).thenReturn(false); context.flushChannel(); verify(selector).executeListener(listener, null); verify(listener2, times(0)).accept(null, null); - assertTrue(context.hasQueuedWriteOps()); + assertTrue(context.readyForFlush()); - when(writeOperation2.isFullyFlushed()).thenReturn(true); + when(flushOperation2.isFullyFlushed()).thenReturn(true); context.flushChannel(); verify(selector).executeListener(listener2, null); - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); } public void testWhenIOExceptionThrownListenerIsCalled() throws IOException { - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); - context.queueWriteOperation(writeOperation); + FlushReadyWrite flushOperation = mock(FlushReadyWrite.class); + context.queueWriteOperation(flushOperation); - assertTrue(context.hasQueuedWriteOps()); + assertTrue(context.readyForFlush()); IOException exception = new IOException(); - when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(flushOperation.getBuffersToWrite()).thenReturn(buffers); when(rawChannel.write(buffers, 0, buffers.length)).thenThrow(exception); - when(writeOperation.getListener()).thenReturn(listener); + when(flushOperation.getListener()).thenReturn(listener); expectThrows(IOException.class, () -> context.flushChannel()); verify(selector).executeFailedListener(listener, exception); - assertFalse(context.hasQueuedWriteOps()); + assertFalse(context.readyForFlush()); } public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOperation = mock(BytesWriteOperation.class); - context.queueWriteOperation(writeOperation); + FlushReadyWrite flushOperation = mock(FlushReadyWrite.class); + context.queueWriteOperation(flushOperation); IOException exception = new IOException(); - when(writeOperation.getBuffersToWrite()).thenReturn(buffers); + when(flushOperation.getBuffersToWrite()).thenReturn(buffers); when(rawChannel.write(buffers, 0, buffers.length)).thenThrow(exception); assertFalse(context.selectorShouldClose()); @@ -344,7 +253,7 @@ public void testWriteIOExceptionMeansChannelReadyToClose() throws IOException { assertTrue(context.selectorShouldClose()); } - public void initiateCloseSchedulesCloseWithSelector() { + public void testInitiateCloseSchedulesCloseWithSelector() { context.closeChannel(); verify(selector).queueChannelClose(channel); } @@ -356,4 +265,18 @@ private static byte[] createMessage(int length) { } return bytes; } + + private static class TestReadWriteHandler extends BytesWriteHandler { + + private final CheckedFunction fn; + + private TestReadWriteHandler(CheckedFunction fn) { + this.fn = fn; + } + + @Override + public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { + return fn.apply(channelBuffer); + } + } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteOperationTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java similarity index 87% rename from libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteOperationTests.java rename to libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java index 05afc80a49086..a244de51f3591 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/BytesWriteOperationTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/FlushOperationTests.java @@ -29,22 +29,19 @@ import static org.mockito.Mockito.mock; -public class BytesWriteOperationTests extends ESTestCase { +public class FlushOperationTests extends ESTestCase { - private SocketChannelContext channelContext; private BiConsumer listener; @Before @SuppressWarnings("unchecked") public void setFields() { - channelContext = mock(SocketChannelContext.class); listener = mock(BiConsumer.class); - } public void testFullyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOp = new BytesWriteOperation(channelContext, buffers, listener); + FlushOperation writeOp = new FlushOperation(buffers, listener); writeOp.incrementIndex(10); @@ -53,7 +50,7 @@ public void testFullyFlushedMarker() { public void testPartiallyFlushedMarker() { ByteBuffer[] buffers = {ByteBuffer.allocate(10)}; - BytesWriteOperation writeOp = new BytesWriteOperation(channelContext, buffers, listener); + FlushOperation writeOp = new FlushOperation(buffers, listener); writeOp.incrementIndex(5); @@ -62,7 +59,7 @@ public void testPartiallyFlushedMarker() { public void testMultipleFlushesWithCompositeBuffer() throws IOException { ByteBuffer[] buffers = {ByteBuffer.allocate(10), ByteBuffer.allocate(15), ByteBuffer.allocate(3)}; - BytesWriteOperation writeOp = new BytesWriteOperation(channelContext, buffers, listener); + FlushOperation writeOp = new FlushOperation(buffers, listener); ArgumentCaptor buffersCaptor = ArgumentCaptor.forClass(ByteBuffer[].class); diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java index 17e6b7acba283..d6787f7cc1534 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketChannelContextTests.java @@ -21,18 +21,27 @@ import org.elasticsearch.test.ESTestCase; import org.junit.Before; +import org.mockito.ArgumentCaptor; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; import java.nio.channels.SocketChannel; +import java.util.Arrays; +import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.Consumer; +import java.util.function.Supplier; import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyInt; +import static org.mockito.Matchers.isNull; +import static org.mockito.Matchers.same; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class SocketChannelContextTests extends ESTestCase { @@ -41,6 +50,9 @@ public class SocketChannelContextTests extends ESTestCase { private TestSocketChannelContext context; private Consumer exceptionHandler; private NioSocketChannel channel; + private BiConsumer listener; + private SocketSelector selector; + private ReadWriteHandler readWriteHandler; @SuppressWarnings("unchecked") @Before @@ -49,9 +61,15 @@ public void setup() throws Exception { rawChannel = mock(SocketChannel.class); channel = mock(NioSocketChannel.class); + listener = mock(BiConsumer.class); when(channel.getRawChannel()).thenReturn(rawChannel); exceptionHandler = mock(Consumer.class); - context = new TestSocketChannelContext(channel, mock(SocketSelector.class), exceptionHandler); + selector = mock(SocketSelector.class); + readWriteHandler = mock(ReadWriteHandler.class); + InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + + when(selector.isOnCurrentThread()).thenReturn(true); } public void testIOExceptionSetIfEncountered() throws IOException { @@ -119,10 +137,147 @@ public void testConnectFails() throws IOException { assertSame(ioException, exception.get()); } + public void testWriteFailsIfClosing() { + context.closeChannel(); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + context.sendMessage(buffers, listener); + + verify(listener).accept(isNull(Void.class), any(ClosedChannelException.class)); + } + + public void testSendMessageFromDifferentThreadIsQueuedWithSelector() throws Exception { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); + + when(selector.isOnCurrentThread()).thenReturn(false); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + WriteOperation writeOperation = mock(WriteOperation.class); + when(readWriteHandler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); + context.sendMessage(buffers, listener); + + verify(selector).queueWrite(writeOpCaptor.capture()); + WriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(writeOperation, writeOp); + } + + public void testSendMessageFromSameThreadIsQueuedInChannel() { + ArgumentCaptor writeOpCaptor = ArgumentCaptor.forClass(WriteOperation.class); + + ByteBuffer[] buffers = {ByteBuffer.wrap(createMessage(10))}; + WriteOperation writeOperation = mock(WriteOperation.class); + when(readWriteHandler.createWriteOperation(context, buffers, listener)).thenReturn(writeOperation); + context.sendMessage(buffers, listener); + + verify(selector).queueWriteInChannelBuffer(writeOpCaptor.capture()); + WriteOperation writeOp = writeOpCaptor.getValue(); + + assertSame(writeOperation, writeOp); + } + + public void testWriteIsQueuedInChannel() { + assertFalse(context.readyForFlush()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + FlushReadyWrite writeOperation = new FlushReadyWrite(context, buffer, listener); + when(readWriteHandler.writeToBytes(writeOperation)).thenReturn(Collections.singletonList(writeOperation)); + context.queueWriteOperation(writeOperation); + + verify(readWriteHandler).writeToBytes(writeOperation); + assertTrue(context.readyForFlush()); + } + + public void testHandleReadBytesWillCheckForNewFlushOperations() throws IOException { + assertFalse(context.readyForFlush()); + when(readWriteHandler.pollFlushOperations()).thenReturn(Collections.singletonList(mock(FlushOperation.class))); + context.handleReadBytes(); + assertTrue(context.readyForFlush()); + } + + @SuppressWarnings({"unchecked", "varargs"}) + public void testFlushOpsClearedOnClose() throws Exception { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + + assertFalse(context.readyForFlush()); + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + WriteOperation writeOperation = mock(WriteOperation.class); + BiConsumer listener2 = mock(BiConsumer.class); + when(readWriteHandler.writeToBytes(writeOperation)).thenReturn(Arrays.asList(new FlushOperation(buffer, listener), + new FlushOperation(buffer, listener2))); + context.queueWriteOperation(writeOperation); + + assertTrue(context.readyForFlush()); + + when(channel.isOpen()).thenReturn(true); + context.closeFromSelector(); + + verify(selector, times(1)).executeFailedListener(same(listener), any(ClosedChannelException.class)); + verify(selector, times(1)).executeFailedListener(same(listener2), any(ClosedChannelException.class)); + + assertFalse(context.readyForFlush()); + } + } + + @SuppressWarnings({"unchecked", "varargs"}) + public void testWillPollForFlushOpsToClose() throws Exception { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + InboundChannelBuffer channelBuffer = InboundChannelBuffer.allocatingInstance(); + context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); + + + ByteBuffer[] buffer = {ByteBuffer.allocate(10)}; + BiConsumer listener2 = mock(BiConsumer.class); + + assertFalse(context.readyForFlush()); + when(channel.isOpen()).thenReturn(true); + when(readWriteHandler.pollFlushOperations()).thenReturn(Arrays.asList(new FlushOperation(buffer, listener), + new FlushOperation(buffer, listener2))); + context.closeFromSelector(); + + verify(selector, times(1)).executeFailedListener(same(listener), any(ClosedChannelException.class)); + verify(selector, times(1)).executeFailedListener(same(listener2), any(ClosedChannelException.class)); + + assertFalse(context.readyForFlush()); + } + } + + public void testCloseClosesWriteProducer() throws IOException { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + when(channel.isOpen()).thenReturn(true); + InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance(); + BytesChannelContext context = new BytesChannelContext(channel, selector, exceptionHandler, readWriteHandler, buffer); + context.closeFromSelector(); + verify(readWriteHandler).close(); + } + } + + @SuppressWarnings("unchecked") + public void testCloseClosesChannelBuffer() throws IOException { + try (SocketChannel realChannel = SocketChannel.open()) { + when(channel.getRawChannel()).thenReturn(realChannel); + when(channel.isOpen()).thenReturn(true); + Runnable closer = mock(Runnable.class); + Supplier pageSupplier = () -> new InboundChannelBuffer.Page(ByteBuffer.allocate(1 << 14), closer); + InboundChannelBuffer buffer = new InboundChannelBuffer(pageSupplier); + buffer.ensureCapacity(1); + TestSocketChannelContext context = new TestSocketChannelContext(channel, selector, exceptionHandler, readWriteHandler, buffer); + context.closeFromSelector(); + verify(closer).run(); + } + } + private static class TestSocketChannelContext extends SocketChannelContext { - private TestSocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler) { - super(channel, selector, exceptionHandler); + private TestSocketChannelContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, + ReadWriteHandler readWriteHandler, InboundChannelBuffer channelBuffer) { + super(channel, selector, exceptionHandler, readWriteHandler, channelBuffer); } @Override @@ -135,16 +290,6 @@ public int read() throws IOException { } } - @Override - public void sendMessage(ByteBuffer[] buffers, BiConsumer listener) { - - } - - @Override - public void queueWriteOperation(WriteOperation writeOperation) { - - } - @Override public void flushChannel() throws IOException { if (randomBoolean()) { @@ -155,11 +300,6 @@ public void flushChannel() throws IOException { } } - @Override - public boolean hasQueuedWriteOps() { - return false; - } - @Override public boolean selectorShouldClose() { return false; @@ -167,7 +307,15 @@ public boolean selectorShouldClose() { @Override public void closeChannel() { + isClosing.set(true); + } + } + private static byte[] createMessage(int length) { + byte[] bytes = new byte[length]; + for (int i = 0; i < length; ++i) { + bytes[i] = randomByte(); } + return bytes; } } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java index 4f476c1ff6b22..a80563f7d74db 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketEventHandlerTests.java @@ -26,6 +26,7 @@ import java.nio.channels.CancelledKeyException; import java.nio.channels.SelectionKey; import java.nio.channels.SocketChannel; +import java.util.Collections; import java.util.function.Consumer; import static org.mockito.Mockito.mock; @@ -37,6 +38,7 @@ public class SocketEventHandlerTests extends ESTestCase { private Consumer exceptionHandler; + private ReadWriteHandler readWriteHandler; private SocketEventHandler handler; private NioSocketChannel channel; private SocketChannel rawChannel; @@ -46,13 +48,14 @@ public class SocketEventHandlerTests extends ESTestCase { @SuppressWarnings("unchecked") public void setUpHandler() throws IOException { exceptionHandler = mock(Consumer.class); + readWriteHandler = mock(ReadWriteHandler.class); SocketSelector selector = mock(SocketSelector.class); handler = new SocketEventHandler(logger); rawChannel = mock(SocketChannel.class); channel = new NioSocketChannel(rawChannel); when(rawChannel.finishConnect()).thenReturn(true); - context = new DoNotRegisterContext(channel, selector, exceptionHandler, new TestSelectionKey(0)); + context = new DoNotRegisterContext(channel, selector, exceptionHandler, new TestSelectionKey(0), readWriteHandler); channel.setContext(context); handler.handleRegistration(context); @@ -83,7 +86,9 @@ public void testRegisterAddsAttachment() throws IOException { } public void testRegisterWithPendingWritesAddsOP_CONNECTAndOP_READAndOP_WRITEInterest() throws IOException { - channel.getContext().queueWriteOperation(mock(BytesWriteOperation.class)); + FlushReadyWrite flushReadyWrite = mock(FlushReadyWrite.class); + when(readWriteHandler.writeToBytes(flushReadyWrite)).thenReturn(Collections.singletonList(flushReadyWrite)); + channel.getContext().queueWriteOperation(flushReadyWrite); handler.handleRegistration(context); assertEquals(SelectionKey.OP_READ | SelectionKey.OP_CONNECT | SelectionKey.OP_WRITE, context.getSelectionKey().interestOps()); } @@ -162,7 +167,7 @@ public void testPostHandlingWillAddWriteIfNecessary() throws IOException { TestSelectionKey selectionKey = new TestSelectionKey(SelectionKey.OP_READ); SocketChannelContext context = mock(SocketChannelContext.class); when(context.getSelectionKey()).thenReturn(selectionKey); - when(context.hasQueuedWriteOps()).thenReturn(true); + when(context.readyForFlush()).thenReturn(true); NioSocketChannel channel = mock(NioSocketChannel.class); when(channel.getContext()).thenReturn(context); @@ -176,7 +181,7 @@ public void testPostHandlingWillRemoveWriteIfNecessary() throws IOException { TestSelectionKey key = new TestSelectionKey(SelectionKey.OP_READ | SelectionKey.OP_WRITE); SocketChannelContext context = mock(SocketChannelContext.class); when(context.getSelectionKey()).thenReturn(key); - when(context.hasQueuedWriteOps()).thenReturn(false); + when(context.readyForFlush()).thenReturn(false); NioSocketChannel channel = mock(NioSocketChannel.class); when(channel.getContext()).thenReturn(context); @@ -192,8 +197,8 @@ private class DoNotRegisterContext extends BytesChannelContext { private final TestSelectionKey selectionKey; DoNotRegisterContext(NioSocketChannel channel, SocketSelector selector, Consumer exceptionHandler, - TestSelectionKey selectionKey) { - super(channel, selector, exceptionHandler, mock(ReadConsumer.class), InboundChannelBuffer.allocatingInstance()); + TestSelectionKey selectionKey, ReadWriteHandler handler) { + super(channel, selector, exceptionHandler, handler, InboundChannelBuffer.allocatingInstance()); this.selectionKey = selectionKey; } diff --git a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java index 223f14455f96d..a68f5c05dad5a 100644 --- a/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java +++ b/libs/elasticsearch-nio/src/test/java/org/elasticsearch/nio/SocketSelectorTests.java @@ -117,13 +117,13 @@ public void testSuccessfullyRegisterChannelWillAttemptConnect() throws Exception public void testQueueWriteWhenNotRunning() throws Exception { socketSelector.close(); - socketSelector.queueWrite(new BytesWriteOperation(channelContext, buffers, listener)); + socketSelector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); verify(listener).accept(isNull(Void.class), any(ClosedSelectorException.class)); } public void testQueueWriteChannelIsClosed() throws Exception { - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); socketSelector.queueWrite(writeOperation); when(channelContext.isOpen()).thenReturn(false); @@ -136,7 +136,7 @@ public void testQueueWriteChannelIsClosed() throws Exception { public void testQueueWriteSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); socketSelector.queueWrite(writeOperation); @@ -149,7 +149,7 @@ public void testQueueWriteSelectionKeyThrowsException() throws Exception { } public void testQueueWriteSuccessful() throws Exception { - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); socketSelector.queueWrite(writeOperation); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); @@ -161,7 +161,7 @@ public void testQueueWriteSuccessful() throws Exception { } public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); assertTrue((selectionKey.interestOps() & SelectionKey.OP_WRITE) == 0); @@ -174,7 +174,7 @@ public void testQueueDirectlyInChannelBufferSuccessful() throws Exception { public void testQueueDirectlyInChannelBufferSelectionKeyThrowsException() throws Exception { SelectionKey selectionKey = mock(SelectionKey.class); - BytesWriteOperation writeOperation = new BytesWriteOperation(channelContext, buffers, listener); + WriteOperation writeOperation = new FlushReadyWrite(channelContext, buffers, listener); CancelledKeyException cancelledKeyException = new CancelledKeyException(); when(channelContext.getSelectionKey()).thenReturn(selectionKey); @@ -277,7 +277,7 @@ public void testCleanup() throws Exception { socketSelector.preSelect(); - socketSelector.queueWrite(new BytesWriteOperation(channelContext, buffers, listener)); + socketSelector.queueWrite(new FlushReadyWrite(channelContext, buffers, listener)); socketSelector.scheduleForRegistration(unregisteredChannel); TestSelectionKey testSelectionKey = new TestSelectionKey(0); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java index fd797c4340a8f..9969e6b38e54a 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestMultiSearchTemplateAction.java @@ -77,7 +77,7 @@ public static MultiSearchTemplateRequest parseRequest(RestRequest restRequest, b RestMultiSearchAction.parseMultiLineRequest(restRequest, multiRequest.indicesOptions(), allowExplicitIndex, (searchRequest, bytes) -> { - SearchTemplateRequest searchTemplateRequest = RestSearchTemplateAction.parse(bytes); + SearchTemplateRequest searchTemplateRequest = SearchTemplateRequest.fromXContent(bytes); if (searchTemplateRequest.getScript() != null) { searchTemplateRequest.setRequest(searchRequest); multiRequest.add(searchTemplateRequest); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java index d8c67839cb80f..75acc09424359 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestRenderSearchTemplateAction.java @@ -52,7 +52,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client // Creates the render template request SearchTemplateRequest renderRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - renderRequest = RestSearchTemplateAction.parse(parser); + renderRequest = SearchTemplateRequest.fromXContent(parser); } renderRequest.setSimulate(true); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java index 7ab9aa6003334..f42afcc19b80f 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/RestSearchTemplateAction.java @@ -47,33 +47,6 @@ public class RestSearchTemplateAction extends BaseRestHandler { private static final Set RESPONSE_PARAMS = Collections.singleton(RestSearchAction.TYPED_KEYS_PARAM); - private static final ObjectParser PARSER; - static { - PARSER = new ObjectParser<>("search_template"); - PARSER.declareField((parser, request, s) -> - request.setScriptParams(parser.map()) - , new ParseField("params"), ObjectParser.ValueType.OBJECT); - PARSER.declareString((request, s) -> { - request.setScriptType(ScriptType.STORED); - request.setScript(s); - }, new ParseField("id")); - PARSER.declareBoolean(SearchTemplateRequest::setExplain, new ParseField("explain")); - PARSER.declareBoolean(SearchTemplateRequest::setProfile, new ParseField("profile")); - PARSER.declareField((parser, request, value) -> { - request.setScriptType(ScriptType.INLINE); - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { - //convert the template to json which is the only supported XContentType (see CustomMustacheFactory#createEncoder) - try (XContentBuilder builder = XContentFactory.jsonBuilder()) { - request.setScript(Strings.toString(builder.copyCurrentStructure(parser))); - } catch (IOException e) { - throw new ParsingException(parser.getTokenLocation(), "Could not parse inline template", e); - } - } else { - request.setScript(parser.text()); - } - }, new ParseField("source", "inline", "template"), ObjectParser.ValueType.OBJECT_OR_STRING); - } - public RestSearchTemplateAction(Settings settings, RestController controller) { super(settings); @@ -99,17 +72,13 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client // Creates the search template request SearchTemplateRequest searchTemplateRequest; try (XContentParser parser = request.contentOrSourceParamParser()) { - searchTemplateRequest = PARSER.parse(parser, new SearchTemplateRequest(), null); + searchTemplateRequest = SearchTemplateRequest.fromXContent(parser); } searchTemplateRequest.setRequest(searchRequest); return channel -> client.execute(SearchTemplateAction.INSTANCE, searchTemplateRequest, new RestStatusToXContentListener<>(channel)); } - public static SearchTemplateRequest parse(XContentParser parser) throws IOException { - return PARSER.parse(parser, new SearchTemplateRequest(), null); - } - @Override protected Set responseParams() { return RESPONSE_PARAMS; diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java index b0186b7b0e3cf..da3cc3688149c 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateRequest.java @@ -23,19 +23,28 @@ import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.ScriptType; import java.io.IOException; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.action.ValidateActions.addValidationError; /** * A request to execute a search based on a search template. */ -public class SearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest { +public class SearchTemplateRequest extends ActionRequest implements CompositeIndicesRequest, ToXContentObject { private SearchRequest request; private boolean simulate = false; @@ -60,6 +69,24 @@ public SearchRequest getRequest() { return request; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SearchTemplateRequest request1 = (SearchTemplateRequest) o; + return simulate == request1.simulate && + explain == request1.explain && + profile == request1.profile && + Objects.equals(request, request1.request) && + scriptType == request1.scriptType && + Objects.equals(script, request1.script) && + Objects.equals(scriptParams, request1.scriptParams); + } + + @Override + public int hashCode() { + return Objects.hash(request, simulate, explain, profile, scriptType, script, scriptParams); + } public boolean isSimulate() { return simulate; @@ -134,6 +161,62 @@ public ActionRequestValidationException validate() { return validationException; } + private static ParseField ID_FIELD = new ParseField("id"); + private static ParseField SOURCE_FIELD = new ParseField("source", "inline", "template"); + + private static ParseField PARAMS_FIELD = new ParseField("params"); + private static ParseField EXPLAIN_FIELD = new ParseField("explain"); + private static ParseField PROFILE_FIELD = new ParseField("profile"); + + private static final ObjectParser PARSER; + static { + PARSER = new ObjectParser<>("search_template"); + PARSER.declareField((parser, request, s) -> + request.setScriptParams(parser.map()) + , PARAMS_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareString((request, s) -> { + request.setScriptType(ScriptType.STORED); + request.setScript(s); + }, ID_FIELD); + PARSER.declareBoolean(SearchTemplateRequest::setExplain, EXPLAIN_FIELD); + PARSER.declareBoolean(SearchTemplateRequest::setProfile, PROFILE_FIELD); + PARSER.declareField((parser, request, value) -> { + request.setScriptType(ScriptType.INLINE); + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { + //convert the template to json which is the only supported XContentType (see CustomMustacheFactory#createEncoder) + try (XContentBuilder builder = XContentFactory.jsonBuilder()) { + request.setScript(Strings.toString(builder.copyCurrentStructure(parser))); + } catch (IOException e) { + throw new ParsingException(parser.getTokenLocation(), "Could not parse inline template", e); + } + } else { + request.setScript(parser.text()); + } + }, SOURCE_FIELD, ObjectParser.ValueType.OBJECT_OR_STRING); + } + + public static SearchTemplateRequest fromXContent(XContentParser parser) throws IOException { + return PARSER.parse(parser, new SearchTemplateRequest(), null); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + + if (scriptType == ScriptType.STORED) { + builder.field(ID_FIELD.getPreferredName(), script); + } else if (scriptType == ScriptType.INLINE) { + builder.field(SOURCE_FIELD.getPreferredName(), script); + } else { + throw new UnsupportedOperationException("Unrecognized script type [" + scriptType + "]."); + } + + return builder.field(PARAMS_FIELD.getPreferredName(), scriptParams) + .field(EXPLAIN_FIELD.getPreferredName(), explain) + .field(PROFILE_FIELD.getPreferredName(), profile) + .endObject(); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java index 792d993915992..500a5a399ef4a 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/SearchTemplateResponse.java @@ -21,18 +21,23 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.StatusToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import java.io.InputStream; +import java.util.Map; -public class SearchTemplateResponse extends ActionResponse implements StatusToXContentObject { +public class SearchTemplateResponse extends ActionResponse implements StatusToXContentObject { + public static ParseField TEMPLATE_OUTPUT_FIELD = new ParseField("template_output"); /** Contains the source of the rendered template **/ private BytesReference source; @@ -77,6 +82,30 @@ public void readFrom(StreamInput in) throws IOException { response = in.readOptionalStreamable(SearchResponse::new); } + public static SearchTemplateResponse fromXContent(XContentParser parser) throws IOException { + SearchTemplateResponse searchTemplateResponse = new SearchTemplateResponse(); + Map contentAsMap = parser.map(); + + if (contentAsMap.containsKey(TEMPLATE_OUTPUT_FIELD.getPreferredName())) { + Object source = contentAsMap.get(TEMPLATE_OUTPUT_FIELD.getPreferredName()); + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON) + .value(source); + searchTemplateResponse.setSource(BytesReference.bytes(builder)); + } else { + XContentType contentType = parser.contentType(); + XContentBuilder builder = XContentFactory.contentBuilder(contentType) + .map(contentAsMap); + XContentParser searchResponseParser = contentType.xContent().createParser( + parser.getXContentRegistry(), + parser.getDeprecationHandler(), + BytesReference.bytes(builder).streamInput()); + + SearchResponse searchResponse = SearchResponse.fromXContent(searchResponseParser); + searchTemplateResponse.setResponse(searchResponse); + } + return searchTemplateResponse; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (hasResponse()) { @@ -85,7 +114,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject(); //we can assume the template is always json as we convert it before compiling it try (InputStream stream = source.streamInput()) { - builder.rawField("template_output", stream, XContentType.JSON); + builder.rawField(TEMPLATE_OUTPUT_FIELD.getPreferredName(), stream, XContentType.JSON); } builder.endObject(); } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java index 1529b655a5042..fe2fedf62b559 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateIT.java @@ -101,7 +101,7 @@ public void testTemplateQueryAsEscapedString() throws Exception { + " \"size\": 1" + " }" + "}"; - SearchTemplateRequest request = RestSearchTemplateAction.parse(createParser(JsonXContent.jsonXContent, query)); + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, query)); request.setRequest(searchRequest); SearchTemplateResponse searchResponse = client().execute(SearchTemplateAction.INSTANCE, request).get(); assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); @@ -122,7 +122,7 @@ public void testTemplateQueryAsEscapedStringStartingWithConditionalClause() thro + " \"use_size\": true" + " }" + "}"; - SearchTemplateRequest request = RestSearchTemplateAction.parse(createParser(JsonXContent.jsonXContent, templateString)); + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); request.setRequest(searchRequest); SearchTemplateResponse searchResponse = client().execute(SearchTemplateAction.INSTANCE, request).get(); assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); @@ -143,7 +143,7 @@ public void testTemplateQueryAsEscapedStringWithConditionalClauseAtEnd() throws + " \"use_size\": true" + " }" + "}"; - SearchTemplateRequest request = RestSearchTemplateAction.parse(createParser(JsonXContent.jsonXContent, templateString)); + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(createParser(JsonXContent.jsonXContent, templateString)); request.setRequest(searchRequest); SearchTemplateResponse searchResponse = client().execute(SearchTemplateAction.INSTANCE, request).get(); assertThat(searchResponse.getResponse().getHits().getHits().length, equalTo(1)); diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java index 9cdca70f0e1a6..7d4a6479727e2 100644 --- a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestTests.java @@ -19,117 +19,77 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.common.xcontent.XContentParseException; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.search.RandomSearchRequestGenerator; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.AbstractStreamableTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.function.Consumer; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasItems; -import static org.hamcrest.Matchers.hasKey; -import static org.hamcrest.Matchers.nullValue; - -public class SearchTemplateRequestTests extends ESTestCase { - - public void testParseInlineTemplate() throws Exception { - String source = "{" + - " 'source' : {\n" + - " 'query': {\n" + - " 'terms': {\n" + - " 'status': [\n" + - " '{{#status}}',\n" + - " '{{.}}',\n" + - " '{{/status}}'\n" + - " ]\n" + - " }\n" + - " }\n" + - " }" + - "}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("{\"query\":{\"terms\":{\"status\":[\"{{#status}}\",\"{{.}}\",\"{{/status}}\"]}}}")); - assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); - assertThat(request.getScriptParams(), nullValue()); - } +public class SearchTemplateRequestTests extends AbstractStreamableTestCase { - public void testParseInlineTemplateWithParams() throws Exception { - String source = "{" + - " 'source' : {" + - " 'query': { 'match' : { '{{my_field}}' : '{{my_value}}' } }," + - " 'size' : '{{my_size}}'" + - " }," + - " 'params' : {" + - " 'my_field' : 'foo'," + - " 'my_value' : 'bar'," + - " 'my_size' : 5" + - " }" + - "}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("{\"query\":{\"match\":{\"{{my_field}}\":\"{{my_value}}\"}},\"size\":\"{{my_size}}\"}")); - assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); - assertThat(request.getScriptParams().size(), equalTo(3)); - assertThat(request.getScriptParams(), hasEntry("my_field", "foo")); - assertThat(request.getScriptParams(), hasEntry("my_value", "bar")); - assertThat(request.getScriptParams(), hasEntry("my_size", 5)); + @Override + protected SearchTemplateRequest createBlankInstance() { + return new SearchTemplateRequest(); } - public void testParseInlineTemplateAsString() throws Exception { - String source = "{'source' : '{\\\"query\\\":{\\\"bool\\\":{\\\"must\\\":{\\\"match\\\":{\\\"foo\\\":\\\"{{text}}\\\"}}}}}'}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("{\"query\":{\"bool\":{\"must\":{\"match\":{\"foo\":\"{{text}}\"}}}}}")); - assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); - assertThat(request.getScriptParams(), nullValue()); + @Override + protected SearchTemplateRequest createTestInstance() { + return createRandomRequest(); } - @SuppressWarnings("unchecked") - public void testParseInlineTemplateAsStringWithParams() throws Exception { - String source = "{'source' : '{\\\"query\\\":{\\\"match\\\":{\\\"{{field}}\\\":\\\"{{value}}\\\"}}}', " + - "'params': {'status': ['pending', 'published']}}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("{\"query\":{\"match\":{\"{{field}}\":\"{{value}}\"}}}")); - assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); - assertThat(request.getScriptParams().size(), equalTo(1)); - assertThat(request.getScriptParams(), hasKey("status")); - assertThat((List) request.getScriptParams().get("status"), hasItems("pending", "published")); + @Override + protected SearchTemplateRequest mutateInstance(SearchTemplateRequest instance) throws IOException { + List> mutators = new ArrayList<>(); + + mutators.add(request -> request.setScriptType( + randomValueOtherThan(request.getScriptType(), () -> randomFrom(ScriptType.values())))); + mutators.add(request -> request.setScript( + randomValueOtherThan(request.getScript(), () -> randomAlphaOfLength(50)))); + + mutators.add(request -> { + Map mutatedScriptParams = new HashMap<>(request.getScriptParams()); + String newField = randomValueOtherThanMany(mutatedScriptParams::containsKey, () -> randomAlphaOfLength(5)); + mutatedScriptParams.put(newField, randomAlphaOfLength(10)); + request.setScriptParams(mutatedScriptParams); + }); + + mutators.add(request -> request.setProfile(!request.isProfile())); + mutators.add(request -> request.setExplain(!request.isExplain())); + mutators.add(request -> request.setSimulate(!request.isSimulate())); + + mutators.add(request -> request.setRequest( + RandomSearchRequestGenerator.randomSearchRequest(SearchSourceBuilder::searchSource))); + + SearchTemplateRequest mutatedInstance = copyInstance(instance); + Consumer mutator = randomFrom(mutators); + mutator.accept(mutatedInstance); + return mutatedInstance; } - public void testParseStoredTemplate() throws Exception { - String source = "{'id' : 'storedTemplate'}"; - - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("storedTemplate")); - assertThat(request.getScriptType(), equalTo(ScriptType.STORED)); - assertThat(request.getScriptParams(), nullValue()); - } - public void testParseStoredTemplateWithParams() throws Exception { - String source = "{'id' : 'another_template', 'params' : {'bar': 'foo'}}"; + public static SearchTemplateRequest createRandomRequest() { + SearchTemplateRequest request = new SearchTemplateRequest(); + request.setScriptType(randomFrom(ScriptType.values())); + request.setScript(randomAlphaOfLength(50)); - SearchTemplateRequest request = RestSearchTemplateAction.parse(newParser(source)); - assertThat(request.getScript(), equalTo("another_template")); - assertThat(request.getScriptType(), equalTo(ScriptType.STORED)); - assertThat(request.getScriptParams().size(), equalTo(1)); - assertThat(request.getScriptParams(), hasEntry("bar", "foo")); - } + Map scriptParams = new HashMap<>(); + for (int i = 0; i < randomInt(10); i++) { + scriptParams.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + } + request.setScriptParams(scriptParams); - public void testParseWrongTemplate() { - // Unclosed template id - expectThrows(XContentParseException.class, () -> RestSearchTemplateAction.parse(newParser("{'id' : 'another_temp }"))); - } + request.setExplain(randomBoolean()); + request.setProfile(randomBoolean()); + request.setSimulate(randomBoolean()); - /** - * Creates a {@link XContentParser} with the given String while replacing single quote to double quotes. - */ - private XContentParser newParser(String s) throws IOException { - assertNotNull(s); - return createParser(JsonXContent.jsonXContent, s.replace("'", "\"")); + request.setRequest(RandomSearchRequestGenerator.randomSearchRequest( + SearchSourceBuilder::searchSource)); + return request; } } diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestXContentTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestXContentTests.java new file mode 100644 index 0000000000000..0e9e8ca628975 --- /dev/null +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateRequestXContentTests.java @@ -0,0 +1,197 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.mustache; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParseException; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.script.ScriptType; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.nullValue; + +public class SearchTemplateRequestXContentTests extends AbstractXContentTestCase { + + @Override + public SearchTemplateRequest createTestInstance() { + return SearchTemplateRequestTests.createRandomRequest(); + } + + @Override + protected SearchTemplateRequest doParseInstance(XContentParser parser) throws IOException { + return SearchTemplateRequest.fromXContent(parser); + } + + /** + * Note that when checking equality for xContent parsing, we omit two parts of the request: + * - The 'simulate' option, since this parameter is not included in the + * request's xContent (it's instead used to determine the request endpoint). + * - The random SearchRequest, since this component only affects the request + * parameters and also isn't captured in the request's xContent. + */ + @Override + protected void assertEqualInstances(SearchTemplateRequest expectedInstance, SearchTemplateRequest newInstance) { + assertTrue( + expectedInstance.isExplain() == newInstance.isExplain() && + expectedInstance.isProfile() == newInstance.isProfile() && + expectedInstance.getScriptType() == newInstance.getScriptType() && + Objects.equals(expectedInstance.getScript(), newInstance.getScript()) && + Objects.equals(expectedInstance.getScriptParams(), newInstance.getScriptParams())); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + public void testToXContentWithInlineTemplate() throws IOException { + SearchTemplateRequest request = new SearchTemplateRequest(); + + request.setScriptType(ScriptType.INLINE); + request.setScript("{\"query\": { \"match\" : { \"{{my_field}}\" : \"{{my_value}}\" } } }"); + request.setProfile(true); + + Map scriptParams = new HashMap<>(); + scriptParams.put("my_field", "foo"); + scriptParams.put("my_value", "bar"); + request.setScriptParams(scriptParams); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedRequest = XContentFactory.contentBuilder(contentType) + .startObject() + .field("source", "{\"query\": { \"match\" : { \"{{my_field}}\" : \"{{my_value}}\" } } }") + .startObject("params") + .field("my_field", "foo") + .field("my_value", "bar") + .endObject() + .field("explain", false) + .field("profile", true) + .endObject(); + + XContentBuilder actualRequest = XContentFactory.contentBuilder(contentType); + request.toXContent(actualRequest, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent(BytesReference.bytes(expectedRequest), + BytesReference.bytes(actualRequest), + contentType); + } + + public void testToXContentWithStoredTemplate() throws IOException { + SearchTemplateRequest request = new SearchTemplateRequest(); + + request.setScriptType(ScriptType.STORED); + request.setScript("match_template"); + request.setExplain(true); + + Map params = new HashMap<>(); + params.put("my_field", "foo"); + params.put("my_value", "bar"); + request.setScriptParams(params); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedRequest = XContentFactory.contentBuilder(contentType) + .startObject() + .field("id", "match_template") + .startObject("params") + .field("my_field", "foo") + .field("my_value", "bar") + .endObject() + .field("explain", true) + .field("profile", false) + .endObject(); + + XContentBuilder actualRequest = XContentFactory.contentBuilder(contentType); + request.toXContent(actualRequest, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent( + BytesReference.bytes(expectedRequest), + BytesReference.bytes(actualRequest), + contentType); + } + + public void testFromXContentWithEmbeddedTemplate() throws Exception { + String source = "{" + + " 'source' : {\n" + + " 'query': {\n" + + " 'terms': {\n" + + " 'status': [\n" + + " '{{#status}}',\n" + + " '{{.}}',\n" + + " '{{/status}}'\n" + + " ]\n" + + " }\n" + + " }\n" + + " }" + + "}"; + + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(newParser(source)); + assertThat(request.getScript(), equalTo("{\"query\":{\"terms\":{\"status\":[\"{{#status}}\",\"{{.}}\",\"{{/status}}\"]}}}")); + assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); + assertThat(request.getScriptParams(), nullValue()); + } + + public void testFromXContentWithEmbeddedTemplateAndParams() throws Exception { + String source = "{" + + " 'source' : {" + + " 'query': { 'match' : { '{{my_field}}' : '{{my_value}}' } }," + + " 'size' : '{{my_size}}'" + + " }," + + " 'params' : {" + + " 'my_field' : 'foo'," + + " 'my_value' : 'bar'," + + " 'my_size' : 5" + + " }" + + "}"; + + SearchTemplateRequest request = SearchTemplateRequest.fromXContent(newParser(source)); + assertThat(request.getScript(), equalTo("{\"query\":{\"match\":{\"{{my_field}}\":\"{{my_value}}\"}},\"size\":\"{{my_size}}\"}")); + assertThat(request.getScriptType(), equalTo(ScriptType.INLINE)); + assertThat(request.getScriptParams().size(), equalTo(3)); + assertThat(request.getScriptParams(), hasEntry("my_field", "foo")); + assertThat(request.getScriptParams(), hasEntry("my_value", "bar")); + assertThat(request.getScriptParams(), hasEntry("my_size", 5)); + } + + public void testFromXContentWithMalformedRequest() { + // Unclosed template id + expectThrows(XContentParseException.class, () -> SearchTemplateRequest.fromXContent(newParser("{'id' : 'another_temp }"))); + } + + /** + * Creates a {@link XContentParser} with the given String while replacing single quote to double quotes. + */ + private XContentParser newParser(String s) throws IOException { + assertNotNull(s); + return createParser(JsonXContent.jsonXContent, s.replace("'", "\"")); + } +} diff --git a/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java new file mode 100644 index 0000000000000..53f5d1d8f842e --- /dev/null +++ b/modules/lang-mustache/src/test/java/org/elasticsearch/script/mustache/SearchTemplateResponseTests.java @@ -0,0 +1,211 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.mustache; + +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.text.Text; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.search.SearchHit; +import org.elasticsearch.search.SearchHits; +import org.elasticsearch.search.internal.InternalSearchResponse; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.function.Predicate; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; + +public class SearchTemplateResponseTests extends AbstractXContentTestCase { + + @Override + protected SearchTemplateResponse createTestInstance() { + SearchTemplateResponse response = new SearchTemplateResponse(); + if (randomBoolean()) { + response.setResponse(createSearchResponse()); + } else { + response.setSource(createSource()); + } + return response; + } + + @Override + protected SearchTemplateResponse doParseInstance(XContentParser parser) throws IOException { + return SearchTemplateResponse.fromXContent(parser); + } + + /** + * For simplicity we create a minimal response, as there is already a dedicated + * test class for search response parsing and serialization. + */ + private static SearchResponse createSearchResponse() { + long tookInMillis = randomNonNegativeLong(); + int totalShards = randomIntBetween(1, Integer.MAX_VALUE); + int successfulShards = randomIntBetween(0, totalShards); + int skippedShards = randomIntBetween(0, totalShards); + InternalSearchResponse internalSearchResponse = InternalSearchResponse.empty(); + + return new SearchResponse(internalSearchResponse, null, totalShards, successfulShards, + skippedShards, tookInMillis, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + } + + private static BytesReference createSource() { + try { + XContentBuilder source = XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("match") + .field(randomAlphaOfLength(5), randomAlphaOfLength(10)) + .endObject() + .endObject() + .endObject(); + return BytesReference.bytes(source); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + String templateOutputField = SearchTemplateResponse.TEMPLATE_OUTPUT_FIELD.getPreferredName(); + return field -> field.equals(templateOutputField) || field.startsWith(templateOutputField + "."); + } + + /** + * Note that we can't rely on normal equals and hashCode checks, since {@link SearchResponse} doesn't + * currently implement equals and hashCode. Instead, we compare the template outputs for equality, + * and perform some sanity checks on the search response instances. + */ + @Override + protected void assertEqualInstances(SearchTemplateResponse expectedInstance, SearchTemplateResponse newInstance) { + assertNotSame(newInstance, expectedInstance); + + BytesReference expectedSource = expectedInstance.getSource(); + BytesReference newSource = newInstance.getSource(); + assertEquals(expectedSource == null, newSource == null); + if (expectedSource != null) { + try { + assertToXContentEquivalent(expectedSource, newSource, XContentType.JSON); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + assertEquals(expectedInstance.hasResponse(), newInstance.hasResponse()); + if (expectedInstance.hasResponse()) { + SearchResponse expectedResponse = expectedInstance.getResponse(); + SearchResponse newResponse = newInstance.getResponse(); + + assertEquals(expectedResponse.getHits().totalHits, newResponse.getHits().totalHits); + assertEquals(expectedResponse.getHits().getMaxScore(), newResponse.getHits().getMaxScore(), 0.0001); + } + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + public void testSourceToXContent() throws IOException { + SearchTemplateResponse response = new SearchTemplateResponse(); + + XContentBuilder source = XContentFactory.jsonBuilder() + .startObject() + .startObject("query") + .startObject("terms") + .field("status", new String[]{"pending", "published"}) + .endObject() + .endObject() + .endObject(); + response.setSource(BytesReference.bytes(source)); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .startObject("template_output") + .startObject("query") + .startObject("terms") + .field("status", new String[]{"pending", "published"}) + .endObject() + .endObject() + .endObject() + .endObject(); + + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent( + BytesReference.bytes(expectedResponse), + BytesReference.bytes(actualResponse), + contentType); + } + + public void testSearchResponseToXContent() throws IOException { + SearchHit hit = new SearchHit(1, "id", new Text("type"), Collections.emptyMap()); + hit.score(2.0f); + SearchHit[] hits = new SearchHit[] { hit }; + + InternalSearchResponse internalSearchResponse = new InternalSearchResponse( + new SearchHits(hits, 100, 1.5f), null, null, null, false, null, 1); + SearchResponse searchResponse = new SearchResponse(internalSearchResponse, null, + 0, 0, 0, 0, ShardSearchFailure.EMPTY_ARRAY, SearchResponse.Clusters.EMPTY); + + SearchTemplateResponse response = new SearchTemplateResponse(); + response.setResponse(searchResponse); + + XContentType contentType = randomFrom(XContentType.values()); + XContentBuilder expectedResponse = XContentFactory.contentBuilder(contentType) + .startObject() + .field("took", 0) + .field("timed_out", false) + .startObject("_shards") + .field("total", 0) + .field("successful", 0) + .field("skipped", 0) + .field("failed", 0) + .endObject() + .startObject("hits") + .field("total", 100) + .field("max_score", 1.5F) + .startArray("hits") + .startObject() + .field("_type", "type") + .field("_id", "id") + .field("_score", 2.0F) + .endObject() + .endArray() + .endObject() + .endObject(); + + XContentBuilder actualResponse = XContentFactory.contentBuilder(contentType); + response.toXContent(actualResponse, ToXContent.EMPTY_PARAMS); + + assertToXContentEquivalent( + BytesReference.bytes(expectedResponse), + BytesReference.bytes(actualResponse), + contentType); + } +} diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java index 0364ad667efc7..4ebcf8bfb82d2 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/PainlessPlugin.java @@ -32,6 +32,7 @@ import org.elasticsearch.painless.spi.PainlessExtension; import org.elasticsearch.painless.spi.Whitelist; import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.painless.spi.WhitelistLoader; import org.elasticsearch.plugins.ExtensiblePlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; @@ -39,6 +40,7 @@ import org.elasticsearch.rest.RestHandler; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; +import org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctionScript; import java.util.ArrayList; import java.util.Arrays; @@ -55,18 +57,34 @@ */ public final class PainlessPlugin extends Plugin implements ScriptPlugin, ExtensiblePlugin, ActionPlugin { - private final Map, List> extendedWhitelists = new HashMap<>(); + private static final Map, List> whitelists; + + /* + * Contexts from Core that need custom whitelists can add them to the map below. + * Whitelist resources should be added as appropriately named, separate files + * under Painless' resources + */ + static { + Map, List> map = new HashMap<>(); + + // Moving Function Pipeline Agg + List movFn = new ArrayList<>(Whitelist.BASE_WHITELISTS); + movFn.add(WhitelistLoader.loadFromResourceFiles(Whitelist.class, "org.elasticsearch.aggs.movfn.txt")); + map.put(MovingFunctionScript.CONTEXT, movFn); + + whitelists = map; + } @Override public ScriptEngine getScriptEngine(Settings settings, Collection> contexts) { Map, List> contextsWithWhitelists = new HashMap<>(); for (ScriptContext context : contexts) { // we might have a context that only uses the base whitelists, so would not have been filled in by reloadSPI - List whitelists = extendedWhitelists.get(context); - if (whitelists == null) { - whitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); + List contextWhitelists = whitelists.get(context); + if (contextWhitelists == null) { + contextWhitelists = new ArrayList<>(Whitelist.BASE_WHITELISTS); } - contextsWithWhitelists.put(context, whitelists); + contextsWithWhitelists.put(context, contextWhitelists); } return new PainlessScriptEngine(settings, contextsWithWhitelists); } @@ -80,7 +98,7 @@ public List> getSettings() { public void reloadSPI(ClassLoader loader) { for (PainlessExtension extension : ServiceLoader.load(PainlessExtension.class, loader)) { for (Map.Entry, List> entry : extension.getContextWhitelists().entrySet()) { - List existing = extendedWhitelists.computeIfAbsent(entry.getKey(), + List existing = whitelists.computeIfAbsent(entry.getKey(), c -> new ArrayList<>(Whitelist.BASE_WHITELISTS)); existing.addAll(entry.getValue()); } diff --git a/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt new file mode 100644 index 0000000000000..a120b73820ada --- /dev/null +++ b/modules/lang-painless/src/main/resources/org/elasticsearch/painless/spi/org.elasticsearch.aggs.movfn.txt @@ -0,0 +1,32 @@ +# +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# This file contains a whitelist for the Moving Function pipeline aggregator in core + +class org.elasticsearch.search.aggregations.pipeline.movfn.MovingFunctions { + double max(double[]) + double min(double[]) + double sum(double[]) + double stdDev(double[], double) + double unweightedAvg(double[]) + double linearWeightedAvg(double[]) + double ewma(double[], double) + double holt(double[], double, double) + double holtWinters(double[], double, double, double, int, boolean) +} \ No newline at end of file diff --git a/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml new file mode 100644 index 0000000000000..039b54aab01d1 --- /dev/null +++ b/modules/lang-painless/src/test/resources/rest-api-spec/test/painless/70_mov_fn_agg.yml @@ -0,0 +1,315 @@ +# Sanity integration test to make sure the custom context and whitelist work for moving_fn pipeline agg +# +setup: + - skip: + version: " - 6.4.0" + reason: "moving_fn added in 6.4.0" + - do: + indices.create: + index: test + body: + mappings: + _doc: + properties: + value_field: + type: integer + date: + type: date + + - do: + bulk: + refresh: true + body: + - index: + _index: test + _type: _doc + _id: 1 + - date: "2017-01-01T00:00:00" + value_field: 1 + - index: + _index: test + _type: _doc + _id: 2 + - date: "2017-01-02T00:00:00" + value_field: 2 + - index: + _index: test + _type: _doc + _id: 3 + - date: "2017-01-03T00:00:00" + value_field: 3 + - index: + _index: test + _type: _doc + _id: 4 + - date: "2017-01-04T00:00:00" + value_field: 4 + - index: + _index: test + _type: _doc + _id: 5 + - date: "2017-01-05T00:00:00" + value_field: 5 + - index: + _index: test + _type: _doc + _id: 6 + - date: "2017-01-06T00:00:00" + value_field: 6 + + - do: + indices.refresh: + index: [test] + +--- +"max": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.max(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + - is_false: aggregations.the_histo.buckets.0.the_mov_fn.value + - match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 2.0 } + - match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 3.0 } + - match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 4.0 } + - match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 5.0 } + +--- +"min": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.min(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + - is_false: aggregations.the_histo.buckets.0.the_mov_fn.value + - match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 2.0 } + - match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 3.0 } + +--- +"sum": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.sum(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + - match: { aggregations.the_histo.buckets.0.the_mov_fn.value: 0.0 } + - match: { aggregations.the_histo.buckets.1.the_mov_fn.value: 1.0 } + - match: { aggregations.the_histo.buckets.2.the_mov_fn.value: 3.0 } + - match: { aggregations.the_histo.buckets.3.the_mov_fn.value: 6.0 } + - match: { aggregations.the_histo.buckets.4.the_mov_fn.value: 9.0 } + - match: { aggregations.the_histo.buckets.5.the_mov_fn.value: 12.0 } + +--- +"unweightedAvg": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.unweightedAvg(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + +--- +"linearWeightedAvg": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.linearWeightedAvg(values)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + +--- +"ewma": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.ewma(values, 0.1)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + +--- +"holt": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.holt(values, 0.1, 0.1)" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + +--- +"holtWinters": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 1 + script: "if (values.length > 1) { MovingFunctions.holtWinters(values, 0.1, 0.1, 0.1, 1, true)}" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + +--- +"stdDev": + + - do: + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: 3 + script: "MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))" + + - match: { hits.total: 6 } + - length: { hits.hits: 0 } + + + + + + diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java index 5babcef2e8d65..103679f5328ef 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolatorFieldMapper.java @@ -161,7 +161,7 @@ static BinaryFieldMapper createQueryBuilderFieldBuilder(BuilderContext context) } static RangeFieldMapper createExtractedRangeFieldBuilder(String name, RangeType rangeType, BuilderContext context) { - RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder(name, rangeType, context.indexCreatedVersion()); + RangeFieldMapper.Builder builder = new RangeFieldMapper.Builder(name, rangeType); // For now no doc values, because in processQuery(...) only the Lucene range fields get added: builder.docValues(false); return builder.build(context); diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index f218d6ae8dfaa..f1ac681b59fdf 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -115,7 +115,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client @Override protected ReindexRequest buildRequest(RestRequest request) throws IOException { if (request.hasParam("pipeline")) { - throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parmaeter. " + throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parameter. " + "Specify it in the [dest] object instead."); } ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index 1c33ccdaaa289..88fa31f423a21 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -149,7 +149,7 @@ public void testPipelineQueryParameterIsError() throws IOException { request.withParams(singletonMap("pipeline", "doesn't matter")); Exception e = expectThrows(IllegalArgumentException.class, () -> action.buildRequest(request.build())); - assertEquals("_reindex doesn't support [pipeline] as a query parmaeter. Specify it in the [dest] object instead.", e.getMessage()); + assertEquals("_reindex doesn't support [pipeline] as a query parameter. Specify it in the [dest] object instead.", e.getMessage()); } public void testSetScrollTimeout() throws IOException { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java index 12db47908d1f3..6e39a7f50d2cd 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpChannel.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.rest.AbstractRestChannel; @@ -60,27 +61,29 @@ final class Netty4HttpChannel extends AbstractRestChannel { private final FullHttpRequest nettyRequest; private final HttpPipelinedRequest pipelinedRequest; private final ThreadContext threadContext; + private final HttpHandlingSettings handlingSettings; /** * @param transport The corresponding NettyHttpServerTransport where this channel belongs to. * @param request The request that is handled by this channel. * @param pipelinedRequest If HTTP pipelining is enabled provide the corresponding pipelined request. May be null if - * HTTP pipelining is disabled. - * @param detailedErrorsEnabled true iff error messages should include stack traces. + * HTTP pipelining is disabled. + * @param handlingSettings true iff error messages should include stack traces. * @param threadContext the thread context for the channel */ Netty4HttpChannel( final Netty4HttpServerTransport transport, final Netty4HttpRequest request, final HttpPipelinedRequest pipelinedRequest, - final boolean detailedErrorsEnabled, + final HttpHandlingSettings handlingSettings, final ThreadContext threadContext) { - super(request, detailedErrorsEnabled); + super(request, handlingSettings.getDetailedErrorsEnabled()); this.transport = transport; this.channel = request.getChannel(); this.nettyRequest = request.request(); this.pipelinedRequest = pipelinedRequest; this.threadContext = threadContext; + this.handlingSettings = handlingSettings; } @Override @@ -170,7 +173,7 @@ private void setHeaderField(HttpResponse resp, String headerField, String value, } private void addCookies(HttpResponse resp) { - if (transport.resetCookies) { + if (handlingSettings.isResetCookies()) { String cookieString = nettyRequest.headers().get(HttpHeaderNames.COOKIE); if (cookieString != null) { Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); @@ -222,8 +225,6 @@ private FullHttpResponse newResponse(ByteBuf buffer) { return response; } - private static final HttpResponseStatus TOO_MANY_REQUESTS = new HttpResponseStatus(429, "Too Many Requests"); - private static Map MAP; static { @@ -266,7 +267,7 @@ private FullHttpResponse newResponse(ByteBuf buffer) { map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST); map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST); map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST); - map.put(RestStatus.TOO_MANY_REQUESTS, TOO_MANY_REQUESTS); + map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS); map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR); map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED); map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY); @@ -279,5 +280,4 @@ private FullHttpResponse newResponse(ByteBuf buffer) { private static HttpResponseStatus getStatus(RestStatus status) { return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR); } - } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java index 1fd18b2a016d7..74429c8dda9b7 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpRequestHandler.java @@ -29,6 +29,7 @@ import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaders; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.transport.netty4.Netty4Utils; @@ -39,14 +40,15 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler { private final Netty4HttpServerTransport serverTransport; + private final HttpHandlingSettings handlingSettings; private final boolean httpPipeliningEnabled; - private final boolean detailedErrorsEnabled; private final ThreadContext threadContext; - Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, boolean detailedErrorsEnabled, ThreadContext threadContext) { + Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, HttpHandlingSettings handlingSettings, + ThreadContext threadContext) { this.serverTransport = serverTransport; this.httpPipeliningEnabled = serverTransport.pipelining; - this.detailedErrorsEnabled = detailedErrorsEnabled; + this.handlingSettings = handlingSettings; this.threadContext = threadContext; } @@ -109,7 +111,7 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except Netty4HttpChannel innerChannel; try { innerChannel = - new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, handlingSettings, threadContext); } catch (final IllegalArgumentException e) { if (badRequestCause == null) { badRequestCause = e; @@ -124,7 +126,7 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except copy, ctx.channel()); innerChannel = - new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, detailedErrorsEnabled, threadContext); + new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, handlingSettings, threadContext); } channel = innerChannel; } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index c8c2c4829d2cf..8e5bace46aa7e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -19,8 +19,6 @@ package org.elasticsearch.http.netty4; -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; import io.netty.bootstrap.ServerBootstrap; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -44,15 +42,12 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.transport.BoundTransportAddress; import org.elasticsearch.common.transport.NetworkExceptionHelper; -import org.elasticsearch.common.transport.PortsRange; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; @@ -62,18 +57,14 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.http.BindHttpException; -import org.elasticsearch.http.HttpInfo; -import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpStats; import org.elasticsearch.http.netty4.cors.Netty4CorsConfig; import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; import org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler; -import org.elasticsearch.rest.RestChannel; -import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestUtils; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BindTransportException; import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler; import org.elasticsearch.transport.netty4.Netty4Utils; @@ -94,7 +85,6 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_MAX_AGE; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_BIND_HOST; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; @@ -102,9 +92,6 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PORT; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST; -import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE; @@ -116,7 +103,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS; import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN; -public class Netty4HttpServerTransport extends AbstractLifecycleComponent implements HttpServerTransport { +public class Netty4HttpServerTransport extends AbstractHttpServerTransport { static { Netty4Utils.setup(); @@ -167,11 +154,8 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem public static final Setting SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting("http.netty.receive_predictor_size", new ByteSizeValue(64, ByteSizeUnit.KB), Property.NodeScope); - - protected final NetworkService networkService; protected final BigArrays bigArrays; - protected final ByteSizeValue maxContentLength; protected final ByteSizeValue maxInitialLineLength; protected final ByteSizeValue maxHeaderSize; protected final ByteSizeValue maxChunkSize; @@ -182,20 +166,6 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem protected final int pipeliningMaxEvents; - protected final boolean compression; - - protected final int compressionLevel; - - protected final boolean resetCookies; - - protected final PortsRange port; - - protected final String bindHosts[]; - - protected final String publishHosts[]; - - protected final boolean detailedErrorsEnabled; - protected final ThreadPool threadPool; /** * The registry used to construct parsers so they support {@link XContentParser#namedObject(Class, String, Object)}. */ @@ -211,14 +181,13 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem private final int readTimeoutMillis; protected final int maxCompositeBufferComponents; - private final Dispatcher dispatcher; protected volatile ServerBootstrap serverBootstrap; - protected volatile BoundTransportAddress boundAddress; - protected final List serverChannels = new ArrayList<>(); + protected final HttpHandlingSettings httpHandlingSettings; + // package private for testing Netty4OpenChannelsHandler serverOpenChannels; @@ -227,49 +196,40 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem public Netty4HttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, NamedXContentRegistry xContentRegistry, Dispatcher dispatcher) { - super(settings); + super(settings, networkService, threadPool, dispatcher); Netty4Utils.setAvailableProcessors(EsExecutors.PROCESSORS_SETTING.get(settings)); - this.networkService = networkService; this.bigArrays = bigArrays; - this.threadPool = threadPool; this.xContentRegistry = xContentRegistry; - this.dispatcher = dispatcher; - ByteSizeValue maxContentLength = SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings); this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); - this.resetCookies = SETTING_HTTP_RESET_COOKIES.get(settings); + this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), + Math.toIntExact(maxChunkSize.getBytes()), + Math.toIntExact(maxHeaderSize.getBytes()), + Math.toIntExact(maxInitialLineLength.getBytes()), + SETTING_HTTP_RESET_COOKIES.get(settings), + SETTING_HTTP_COMPRESSION.get(settings), + SETTING_HTTP_COMPRESSION_LEVEL.get(settings), + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings)); + this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings); this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings); - this.port = SETTING_HTTP_PORT.get(settings); - // we can't make the network.bind_host a fallback since we already fall back to http.host hence the extra conditional here - List httpBindHost = SETTING_HTTP_BIND_HOST.get(settings); - this.bindHosts = (httpBindHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_BINDHOST_SETTING.get(settings) : httpBindHost) - .toArray(Strings.EMPTY_ARRAY); - // we can't make the network.publish_host a fallback since we already fall back to http.host hence the extra conditional here - List httpPublishHost = SETTING_HTTP_PUBLISH_HOST.get(settings); - this.publishHosts = (httpPublishHost.isEmpty() ? NetworkService.GLOBAL_NETWORK_PUBLISHHOST_SETTING.get(settings) : httpPublishHost) - .toArray(Strings.EMPTY_ARRAY); + this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); this.tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings); this.tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings); - this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings); this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis()); ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings); recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt()); - this.compression = SETTING_HTTP_COMPRESSION.get(settings); - this.compressionLevel = SETTING_HTTP_COMPRESSION_LEVEL.get(settings); this.pipelining = SETTING_PIPELINING.get(settings); this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings); this.corsConfig = buildCorsConfig(settings); - this.maxContentLength = maxContentLength; - logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " + "receive_predictor[{}], max_composite_buffer_components[{}], pipelining[{}], pipelining_max_events[{}]", maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, maxCompositeBufferComponents, @@ -326,65 +286,6 @@ protected void doStart() { } } - private BoundTransportAddress createBoundHttpAddress() { - // Bind and start to accept incoming connections. - InetAddress hostAddresses[]; - try { - hostAddresses = networkService.resolveBindHostAddresses(bindHosts); - } catch (IOException e) { - throw new BindHttpException("Failed to resolve host [" + Arrays.toString(bindHosts) + "]", e); - } - - List boundAddresses = new ArrayList<>(hostAddresses.length); - for (InetAddress address : hostAddresses) { - boundAddresses.add(bindAddress(address)); - } - - final InetAddress publishInetAddress; - try { - publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts); - } catch (Exception e) { - throw new BindTransportException("Failed to resolve publish address", e); - } - - final int publishPort = resolvePublishPort(settings, boundAddresses, publishInetAddress); - final InetSocketAddress publishAddress = new InetSocketAddress(publishInetAddress, publishPort); - return new BoundTransportAddress(boundAddresses.toArray(new TransportAddress[0]), new TransportAddress(publishAddress)); - } - - // package private for tests - static int resolvePublishPort(Settings settings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = SETTING_HTTP_PUBLISH_PORT.get(settings); - - if (publishPort < 0) { - for (TransportAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.address().getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final IntSet ports = new IntHashSet(); - for (TransportAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next().value; - } - } - - if (publishPort < 0) { - throw new BindHttpException("Failed to auto-resolve http publish port, multiple bound addresses " + boundAddresses + - " with distinct ports and none of them matched the publish address (" + publishInetAddress + "). " + - "Please specify a unique port by setting " + SETTING_HTTP_PORT.getKey() + " or " + SETTING_HTTP_PUBLISH_PORT.getKey()); - } - return publishPort; - } - // package private for testing static Netty4CorsConfig buildCorsConfig(Settings settings) { if (SETTING_CORS_ENABLED.get(settings) == false) { @@ -419,7 +320,8 @@ static Netty4CorsConfig buildCorsConfig(Settings settings) { .build(); } - private TransportAddress bindAddress(final InetAddress hostAddress) { + @Override + protected TransportAddress bindAddress(final InetAddress hostAddress) { final AtomicReference lastException = new AtomicReference<>(); final AtomicReference boundSocket = new AtomicReference<>(); boolean success = port.iterate(portNumber -> { @@ -473,20 +375,6 @@ protected void doStop() { protected void doClose() { } - @Override - public BoundTransportAddress boundAddress() { - return this.boundAddress; - } - - @Override - public HttpInfo info() { - BoundTransportAddress boundTransportAddress = boundAddress(); - if (boundTransportAddress == null) { - return null; - } - return new HttpInfo(boundTransportAddress, maxContentLength.getBytes()); - } - @Override public HttpStats stats() { Netty4OpenChannelsHandler channels = serverOpenChannels; @@ -497,20 +385,6 @@ public Netty4CorsConfig getCorsConfig() { return corsConfig; } - void dispatchRequest(final RestRequest request, final RestChannel channel) { - final ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - dispatcher.dispatchRequest(request, channel, threadContext); - } - } - - void dispatchBadRequest(final RestRequest request, final RestChannel channel, final Throwable cause) { - final ThreadContext threadContext = threadPool.getThreadContext(); - try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { - dispatcher.dispatchBadRequest(request, channel, threadContext, cause); - } - } - protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { if (cause instanceof ReadTimeoutException) { if (logger.isTraceEnabled()) { @@ -539,20 +413,22 @@ protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throw } public ChannelHandler configureServerChannelHandler() { - return new HttpChannelHandler(this, detailedErrorsEnabled, threadPool.getThreadContext()); + return new HttpChannelHandler(this, httpHandlingSettings, threadPool.getThreadContext()); } protected static class HttpChannelHandler extends ChannelInitializer { private final Netty4HttpServerTransport transport; private final Netty4HttpRequestHandler requestHandler; + private final HttpHandlingSettings handlingSettings; protected HttpChannelHandler( final Netty4HttpServerTransport transport, - final boolean detailedErrorsEnabled, + final HttpHandlingSettings handlingSettings, final ThreadContext threadContext) { this.transport = transport; - this.requestHandler = new Netty4HttpRequestHandler(transport, detailedErrorsEnabled, threadContext); + this.handlingSettings = handlingSettings; + this.requestHandler = new Netty4HttpRequestHandler(transport, handlingSettings, threadContext); } @Override @@ -560,18 +436,18 @@ protected void initChannel(Channel ch) throws Exception { ch.pipeline().addLast("openChannels", transport.serverOpenChannels); ch.pipeline().addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS)); final HttpRequestDecoder decoder = new HttpRequestDecoder( - Math.toIntExact(transport.maxInitialLineLength.getBytes()), - Math.toIntExact(transport.maxHeaderSize.getBytes()), - Math.toIntExact(transport.maxChunkSize.getBytes())); + handlingSettings.getMaxInitialLineLength(), + handlingSettings.getMaxHeaderSize(), + handlingSettings.getMaxChunkSize()); decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); ch.pipeline().addLast("decoder", decoder); ch.pipeline().addLast("decoder_compress", new HttpContentDecompressor()); ch.pipeline().addLast("encoder", new HttpResponseEncoder()); - final HttpObjectAggregator aggregator = new HttpObjectAggregator(Math.toIntExact(transport.maxContentLength.getBytes())); + final HttpObjectAggregator aggregator = new HttpObjectAggregator(handlingSettings.getMaxContentLength()); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); ch.pipeline().addLast("aggregator", aggregator); - if (transport.compression) { - ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel)); + if (handlingSettings.isCompression()) { + ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.getCompressionLevel())); } if (SETTING_CORS_ENABLED.get(transport.settings())) { ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.getCorsConfig())); @@ -587,7 +463,6 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E Netty4Utils.maybeDie(cause); super.exceptionCaught(ctx, cause); } - } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java index 918e98fd2e7c0..0ef1ea585b11c 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpChannelTests.java @@ -56,6 +56,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.http.NullDispatcher; import org.elasticsearch.http.netty4.cors.Netty4CorsHandler; @@ -212,10 +213,11 @@ public void testHeadersSet() { httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote"); final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; // send a response Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); TestResponse resp = new TestResponse(); final String customHeader = "custom-header"; final String customHeaderValue = "xyz"; @@ -242,8 +244,9 @@ public void testReleaseOnSendToClosedChannel() { final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext()); final TestResponse response = new TestResponse(bigArrays); assertThat(response.content(), instanceOf(Releasable.class)); embeddedChannel.close(); @@ -261,8 +264,9 @@ public void testReleaseOnSendToChannelAfterException() throws IOException { final EmbeddedChannel embeddedChannel = new EmbeddedChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel); final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null; + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext()); final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR, JsonXContent.contentBuilder().startObject().endObject()); assertThat(response.content(), not(instanceOf(Releasable.class))); @@ -306,8 +310,9 @@ public void testConnectionClose() throws Exception { // send a response, the channel close status should match assertTrue(embeddedChannel.isOpen()); + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; final Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); final TestResponse resp = new TestResponse(); channel.sendResponse(resp); assertThat(embeddedChannel.isOpen(), equalTo(!close)); @@ -332,9 +337,10 @@ private FullHttpResponse executeRequest(final Settings settings, final String or final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel(); final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel); + HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings; Netty4HttpChannel channel = - new Netty4HttpChannel(httpServerTransport, request, null, randomBoolean(), threadPool.getThreadContext()); + new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext()); channel.sendResponse(new TestResponse()); // get the response diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java index 91a5465f6a764..0eb14a8a76e9b 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpServerPipeliningTests.java @@ -184,7 +184,7 @@ private class CustomHttpChannelHandler extends Netty4HttpServerTransport.HttpCha private final ExecutorService executorService; CustomHttpChannelHandler(Netty4HttpServerTransport transport, ExecutorService executorService, ThreadContext threadContext) { - super(transport, randomBoolean(), threadContext); + super(transport, transport.httpHandlingSettings, threadContext); this.executorService = executorService; } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java index 028770ed22469..bc89558d3c6dc 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/rest/Netty4BadRequestIT.java @@ -99,5 +99,4 @@ public void testInvalidHeaderValue() throws IOException { assertThat(map.get("type"), equalTo("content_type_header_exception")); assertThat(map.get("reason"), equalTo("java.lang.IllegalArgumentException: invalid Content-Type header []")); } - } diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index e164a8553f81f..07ef4b4be5e62 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -22,38 +22,207 @@ esplugin { classname 'org.elasticsearch.repositories.gcs.GoogleCloudStoragePlugin' } -versions << [ - 'google': '1.23.0', -] - dependencies { - compile "com.google.apis:google-api-services-storage:v1-rev115-${versions.google}" - compile "com.google.api-client:google-api-client:${versions.google}" - compile "com.google.oauth-client:google-oauth-client:${versions.google}" - compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" - compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" - compile "commons-logging:commons-logging:${versions.commonslogging}" - compile "commons-codec:commons-codec:${versions.commonscodec}" - compile "com.google.http-client:google-http-client:${versions.google}" - compile "com.google.http-client:google-http-client-jackson2:${versions.google}" + compile 'com.google.cloud:google-cloud-storage:1.28.0' + compile 'com.google.cloud:google-cloud-core:1.28.0' + compile 'com.google.cloud:google-cloud-core-http:1.28.0' + compile 'com.google.auth:google-auth-library-oauth2-http:0.9.1' + compile 'com.google.auth:google-auth-library-credentials:0.9.1' + compile 'com.google.oauth-client:google-oauth-client:1.23.0' + compile 'com.google.http-client:google-http-client:1.23.0' + compile 'com.google.http-client:google-http-client-jackson:1.23.0' + compile 'com.google.http-client:google-http-client-jackson2:1.23.0' + compile 'com.google.http-client:google-http-client-appengine:1.23.0' + compile 'com.google.api-client:google-api-client:1.23.0' + compile 'com.google.api:gax:1.25.0' + compile 'com.google.api:gax-httpjson:0.40.0' + compile 'com.google.api:api-common:1.5.0' + compile 'com.google.api.grpc:proto-google-common-protos:1.8.0' + compile 'com.google.guava:guava:20.0' + compile 'com.google.apis:google-api-services-storage:v1-rev115-1.23.0' + compile 'org.codehaus.jackson:jackson-core-asl:1.9.13' + compile 'io.grpc:grpc-context:1.9.0' + compile 'io.opencensus:opencensus-api:0.11.1' + compile 'io.opencensus:opencensus-contrib-http-util:0.11.1' + compile 'org.threeten:threetenbp:1.3.6' } dependencyLicenses { - mapping from: /google-.*/, to: 'google' + mapping from: /google-cloud-.*/, to: 'google-cloud' + mapping from: /google-auth-.*/, to: 'google-auth' + mapping from: /google-http-.*/, to: 'google-http' + mapping from: /opencensus.*/, to: 'opencensus' } thirdPartyAudit.excludes = [ + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', // classes are missing - 'com.google.common.base.Splitter', - 'com.google.common.collect.Lists', - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener', - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', + 'com.google.appengine.api.datastore.Blob', + 'com.google.appengine.api.datastore.DatastoreService', + 'com.google.appengine.api.datastore.DatastoreServiceFactory', + 'com.google.appengine.api.datastore.Entity', + 'com.google.appengine.api.datastore.Key', + 'com.google.appengine.api.datastore.KeyFactory', + 'com.google.appengine.api.datastore.PreparedQuery', + 'com.google.appengine.api.datastore.Query', + 'com.google.appengine.api.memcache.Expiration', + 'com.google.appengine.api.memcache.MemcacheService', + 'com.google.appengine.api.memcache.MemcacheServiceFactory', + 'com.google.appengine.api.urlfetch.FetchOptions$Builder', + 'com.google.appengine.api.urlfetch.FetchOptions', + 'com.google.appengine.api.urlfetch.HTTPHeader', + 'com.google.appengine.api.urlfetch.HTTPMethod', + 'com.google.appengine.api.urlfetch.HTTPRequest', + 'com.google.appengine.api.urlfetch.HTTPResponse', + 'com.google.appengine.api.urlfetch.URLFetchService', + 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', + 'com.google.gson.Gson', + 'com.google.gson.GsonBuilder', + 'com.google.gson.TypeAdapter', + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonWriter', + 'com.google.iam.v1.Binding$Builder', + 'com.google.iam.v1.Binding', + 'com.google.iam.v1.Policy$Builder', + 'com.google.iam.v1.Policy', + 'com.google.protobuf.AbstractMessageLite$Builder', + 'com.google.protobuf.AbstractParser', + 'com.google.protobuf.Any$Builder', + 'com.google.protobuf.Any', + 'com.google.protobuf.AnyOrBuilder', + 'com.google.protobuf.AnyProto', + 'com.google.protobuf.Api$Builder', + 'com.google.protobuf.Api', + 'com.google.protobuf.ApiOrBuilder', + 'com.google.protobuf.ApiProto', + 'com.google.protobuf.ByteString', + 'com.google.protobuf.CodedInputStream', + 'com.google.protobuf.CodedOutputStream', + 'com.google.protobuf.DescriptorProtos', + 'com.google.protobuf.Descriptors$Descriptor', + 'com.google.protobuf.Descriptors$EnumDescriptor', + 'com.google.protobuf.Descriptors$EnumValueDescriptor', + 'com.google.protobuf.Descriptors$FieldDescriptor', + 'com.google.protobuf.Descriptors$FileDescriptor$InternalDescriptorAssigner', + 'com.google.protobuf.Descriptors$FileDescriptor', + 'com.google.protobuf.Descriptors$OneofDescriptor', + 'com.google.protobuf.Duration$Builder', + 'com.google.protobuf.Duration', + 'com.google.protobuf.DurationOrBuilder', + 'com.google.protobuf.DurationProto', + 'com.google.protobuf.EmptyProto', + 'com.google.protobuf.Enum$Builder', + 'com.google.protobuf.Enum', + 'com.google.protobuf.EnumOrBuilder', + 'com.google.protobuf.ExtensionRegistry', + 'com.google.protobuf.ExtensionRegistryLite', + 'com.google.protobuf.FloatValue$Builder', + 'com.google.protobuf.FloatValue', + 'com.google.protobuf.FloatValueOrBuilder', + 'com.google.protobuf.GeneratedMessage$GeneratedExtension', + 'com.google.protobuf.GeneratedMessage', + 'com.google.protobuf.GeneratedMessageV3$Builder', + 'com.google.protobuf.GeneratedMessageV3$BuilderParent', + 'com.google.protobuf.GeneratedMessageV3$FieldAccessorTable', + 'com.google.protobuf.GeneratedMessageV3', + 'com.google.protobuf.Internal$EnumLite', + 'com.google.protobuf.Internal$EnumLiteMap', + 'com.google.protobuf.Internal', + 'com.google.protobuf.InvalidProtocolBufferException', + 'com.google.protobuf.LazyStringArrayList', + 'com.google.protobuf.LazyStringList', + 'com.google.protobuf.MapEntry$Builder', + 'com.google.protobuf.MapEntry', + 'com.google.protobuf.MapField', + 'com.google.protobuf.Message', + 'com.google.protobuf.MessageOrBuilder', + 'com.google.protobuf.Parser', + 'com.google.protobuf.ProtocolMessageEnum', + 'com.google.protobuf.ProtocolStringList', + 'com.google.protobuf.RepeatedFieldBuilderV3', + 'com.google.protobuf.SingleFieldBuilderV3', + 'com.google.protobuf.Struct$Builder', + 'com.google.protobuf.Struct', + 'com.google.protobuf.StructOrBuilder', + 'com.google.protobuf.StructProto', + 'com.google.protobuf.Timestamp$Builder', + 'com.google.protobuf.Timestamp', + 'com.google.protobuf.TimestampProto', + 'com.google.protobuf.Type$Builder', + 'com.google.protobuf.Type', + 'com.google.protobuf.TypeOrBuilder', + 'com.google.protobuf.TypeProto', + 'com.google.protobuf.UInt32Value$Builder', + 'com.google.protobuf.UInt32Value', + 'com.google.protobuf.UInt32ValueOrBuilder', + 'com.google.protobuf.UnknownFieldSet$Builder', + 'com.google.protobuf.UnknownFieldSet', + 'com.google.protobuf.WireFormat$FieldType', + 'com.google.protobuf.WrappersProto', + 'com.google.protobuf.util.Timestamps', + 'org.apache.http.ConnectionReuseStrategy', + 'org.apache.http.Header', + 'org.apache.http.HttpEntity', + 'org.apache.http.HttpEntityEnclosingRequest', + 'org.apache.http.HttpHost', + 'org.apache.http.HttpRequest', + 'org.apache.http.HttpResponse', + 'org.apache.http.HttpVersion', + 'org.apache.http.RequestLine', + 'org.apache.http.StatusLine', + 'org.apache.http.client.AuthenticationHandler', + 'org.apache.http.client.HttpClient', + 'org.apache.http.client.HttpRequestRetryHandler', + 'org.apache.http.client.RedirectHandler', + 'org.apache.http.client.RequestDirector', + 'org.apache.http.client.UserTokenHandler', + 'org.apache.http.client.methods.HttpDelete', + 'org.apache.http.client.methods.HttpEntityEnclosingRequestBase', + 'org.apache.http.client.methods.HttpGet', + 'org.apache.http.client.methods.HttpHead', + 'org.apache.http.client.methods.HttpOptions', + 'org.apache.http.client.methods.HttpPost', + 'org.apache.http.client.methods.HttpPut', + 'org.apache.http.client.methods.HttpRequestBase', + 'org.apache.http.client.methods.HttpTrace', + 'org.apache.http.conn.ClientConnectionManager', + 'org.apache.http.conn.ConnectionKeepAliveStrategy', + 'org.apache.http.conn.params.ConnManagerParams', + 'org.apache.http.conn.params.ConnPerRouteBean', + 'org.apache.http.conn.params.ConnRouteParams', + 'org.apache.http.conn.routing.HttpRoutePlanner', + 'org.apache.http.conn.scheme.PlainSocketFactory', + 'org.apache.http.conn.scheme.Scheme', + 'org.apache.http.conn.scheme.SchemeRegistry', + 'org.apache.http.conn.ssl.SSLSocketFactory', + 'org.apache.http.conn.ssl.X509HostnameVerifier', + 'org.apache.http.entity.AbstractHttpEntity', + 'org.apache.http.impl.client.DefaultHttpClient', + 'org.apache.http.impl.client.DefaultHttpRequestRetryHandler', + 'org.apache.http.impl.conn.ProxySelectorRoutePlanner', + 'org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager', + 'org.apache.http.message.BasicHttpResponse', + 'org.apache.http.params.BasicHttpParams', + 'org.apache.http.params.HttpConnectionParams', + 'org.apache.http.params.HttpParams', + 'org.apache.http.params.HttpProtocolParams', + 'org.apache.http.protocol.HttpContext', + 'org.apache.http.protocol.HttpProcessor', + 'org.apache.http.protocol.HttpRequestExecutor' ] check { // also execute the QA tests when testing the plugin dependsOn 'qa:google-cloud-storage:check' -} \ No newline at end of file +} diff --git a/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 new file mode 100644 index 0000000000000..64435356e5eaf --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 @@ -0,0 +1 @@ +7e537338d40a57ad469239acb6d828fa544fb52b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/api-common-LICENSE.txt b/plugins/repository-gcs/licenses/api-common-LICENSE.txt new file mode 100644 index 0000000000000..6d16b6578a2f0 --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-LICENSE.txt @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/api-common-NOTICE.txt b/plugins/repository-gcs/licenses/api-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 deleted file mode 100644 index 3fe8682a1b0f9..0000000000000 --- a/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 new file mode 100644 index 0000000000000..594177047c140 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 @@ -0,0 +1 @@ +36ab73c0b5d4a67447eb89a3174cc76ced150bd1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-LICENSE.txt b/plugins/repository-gcs/licenses/gax-LICENSE.txt new file mode 100644 index 0000000000000..267561bb386de --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-LICENSE.txt @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/gax-NOTICE.txt b/plugins/repository-gcs/licenses/gax-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 new file mode 100644 index 0000000000000..c251ea1dd956c --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 @@ -0,0 +1 @@ +cb4bafbfd45b9d24efbb6138a31e37918fac015f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-LICENSE.txt b/plugins/repository-gcs/licenses/gax-httpjson-LICENSE.txt new file mode 100644 index 0000000000000..267561bb386de --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-LICENSE.txt @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/gax-httpjson-NOTICE.txt b/plugins/repository-gcs/licenses/gax-httpjson-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-api-client-LICENSE.txt b/plugins/repository-gcs/licenses/google-api-client-LICENSE.txt new file mode 100644 index 0000000000000..4eedc0116add7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-api-client-NOTICE.txt b/plugins/repository-gcs/licenses/google-api-client-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt new file mode 100644 index 0000000000000..4eedc0116add7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-NOTICE.txt b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-auth-LICENSE.txt b/plugins/repository-gcs/licenses/google-auth-LICENSE.txt new file mode 100644 index 0000000000000..12edf23c6711f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/google-auth-NOTICE.txt b/plugins/repository-gcs/licenses/google-auth-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 new file mode 100644 index 0000000000000..0922a53d2e356 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 @@ -0,0 +1 @@ +25e0f45f3b3d1b4fccc8944845e51a7a4f359652 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 new file mode 100644 index 0000000000000..100a44c187218 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 @@ -0,0 +1 @@ +c0fe3a39b0f28d59de1986b3c50f018cd7cb9ec2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-LICENSE.txt b/plugins/repository-gcs/licenses/google-cloud-LICENSE.txt new file mode 100644 index 0000000000000..4eedc0116add7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-cloud-NOTICE.txt b/plugins/repository-gcs/licenses/google-cloud-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 new file mode 100644 index 0000000000000..071533f227839 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 @@ -0,0 +1 @@ +c0e88c78ce17c92d76bf46345faf3fa68833b216 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 new file mode 100644 index 0000000000000..fed3fc257c32c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 @@ -0,0 +1 @@ +7b4559a9513abd98da50958c56a10f8ae00cb0f7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 new file mode 100644 index 0000000000000..f49152ea05646 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 @@ -0,0 +1 @@ +226019ae816b42c59f1b06999aeeb73722b87200 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-LICENSE.txt b/plugins/repository-gcs/licenses/google-http-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/google-LICENSE.txt rename to plugins/repository-gcs/licenses/google-http-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/google-http-NOTICE.txt b/plugins/repository-gcs/licenses/google-http-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 new file mode 100644 index 0000000000000..823c3a85089a5 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 @@ -0,0 +1 @@ +0eda0d0f758c1cc525866e52e1226c4eb579d130 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 new file mode 100644 index 0000000000000..85ba0ab798d05 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 @@ -0,0 +1 @@ +a72ea3a197937ef63a893e73df312dac0d813663 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-LICENSE.txt b/plugins/repository-gcs/licenses/google-oauth-client-LICENSE.txt new file mode 100644 index 0000000000000..12edf23c6711f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/google-oauth-client-NOTICE.txt b/plugins/repository-gcs/licenses/google-oauth-client-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 new file mode 100644 index 0000000000000..02bac0e492074 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 @@ -0,0 +1 @@ +28b0836f48c9705abf73829bbc536dba29a1329a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-LICENSE.txt b/plugins/repository-gcs/licenses/grpc-context-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/commons-codec-LICENSE.txt rename to plugins/repository-gcs/licenses/grpc-context-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/grpc-context-NOTICE.txt b/plugins/repository-gcs/licenses/grpc-context-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 b/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 new file mode 100644 index 0000000000000..7b6ae09060b29 --- /dev/null +++ b/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 @@ -0,0 +1 @@ +89507701249388e1ed5ddcf8c41f4ce1be7831ef \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/guava-LICENSE.txt b/plugins/repository-gcs/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/guava-NOTICE.txt b/plugins/repository-gcs/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 deleted file mode 100644 index 6937112a09fb6..0000000000000 --- a/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 b/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 deleted file mode 100644 index 581726601745b..0000000000000 --- a/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7501a1b34325abb00d17dde96150604a0658b54 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 new file mode 100644 index 0000000000000..c5016bf828d60 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 @@ -0,0 +1 @@ +3c304d70f42f832e0a86d45bd437f692129299a4 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt b/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt b/plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt b/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/commons-codec-NOTICE.txt b/plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/commons-codec-NOTICE.txt rename to plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt b/plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/commons-logging-LICENSE.txt rename to plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt b/plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/commons-logging-NOTICE.txt rename to plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/old/google-LICENSE.txt b/plugins/repository-gcs/licenses/old/google-LICENSE.txt new file mode 100644 index 0000000000000..980a15ac24eeb --- /dev/null +++ b/plugins/repository-gcs/licenses/old/google-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-NOTICE.txt b/plugins/repository-gcs/licenses/old/google-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/google-NOTICE.txt rename to plugins/repository-gcs/licenses/old/google-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpclient-LICENSE.txt b/plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/httpclient-LICENSE.txt rename to plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/httpclient-NOTICE.txt b/plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/httpclient-NOTICE.txt rename to plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpcore-LICENSE.txt b/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/httpcore-LICENSE.txt rename to plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/httpcore-NOTICE.txt b/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/httpcore-NOTICE.txt rename to plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/opencensus-LICENSE.txt b/plugins/repository-gcs/licenses/opencensus-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/opencensus-NOTICE.txt b/plugins/repository-gcs/licenses/opencensus-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 new file mode 100644 index 0000000000000..61d8e3b148144 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 @@ -0,0 +1 @@ +54689fbf750a7f26e34fa1f1f96b883c53f51486 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 new file mode 100644 index 0000000000000..c0b04f0f8ccce --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 @@ -0,0 +1 @@ +82e572b41e81ecf58d0d1e9a3953a05aa8f9c84b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 new file mode 100644 index 0000000000000..0a2dee4447e92 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 @@ -0,0 +1 @@ +b3282312ba82536fc9a7778cabfde149a875e877 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt b/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt b/plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 new file mode 100644 index 0000000000000..65c16fed4a07b --- /dev/null +++ b/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 @@ -0,0 +1 @@ +89dcc04a7e028c3c963413a71f950703cf51f057 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-LICENSE.txt b/plugins/repository-gcs/licenses/threetenbp-LICENSE.txt new file mode 100644 index 0000000000000..fcdfc8f0d0774 --- /dev/null +++ b/plugins/repository-gcs/licenses/threetenbp-LICENSE.txt @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2007-present, Stephen Colebourne & Michael Nascimento Santos + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * * Neither the name of JSR-310 nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/plugins/repository-gcs/licenses/threetenbp-NOTICE.txt b/plugins/repository-gcs/licenses/threetenbp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle index afd49b9f4dc73..34ec92a354277 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle +++ b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle @@ -69,7 +69,6 @@ task googleCloudStorageFixture(type: AntFixture) { /** A service account file that points to the Google Cloud Storage service emulated by the fixture **/ task createServiceAccountFile() { - dependsOn googleCloudStorageFixture doLast { KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA") keyPairGenerator.initialize(1024) @@ -83,11 +82,7 @@ task createServiceAccountFile() { ' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' + ' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' + ' "client_email": "integration_test@appspot.gserviceaccount.com",\n' + - ' "client_id": "123456789101112130594",\n' + - " \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" + - " \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" + - ' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' + - ' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' + + ' "client_id": "123456789101112130594"\n' + '}', 'UTF-8') } } @@ -109,6 +104,7 @@ integTestCluster { dependsOn createServiceAccountFile, googleCloudStorageFixture /* Use a closure on the string to delay evaluation until tests are executed */ setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" + setting 'gcs.client.integration_test.token_uri', "http://${ -> googleCloudStorageFixture.addressAndPort }/o/oauth2/token" } else { println "Using an external service to test the repository-gcs plugin" } diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java index 2330e230f4505..a9832ae318de4 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java @@ -31,13 +31,18 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.zip.GZIPInputStream; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; @@ -52,7 +57,7 @@ */ public class GoogleCloudStorageTestServer { - private static byte[] EMPTY_BYTE = new byte[0]; + private static final byte[] EMPTY_BYTE = new byte[0]; /** List of the buckets stored on this test server **/ private final Map buckets = ConcurrentCollections.newConcurrentMap(); @@ -63,13 +68,6 @@ public class GoogleCloudStorageTestServer { /** Server endpoint **/ private final String endpoint; - /** - * Creates a {@link GoogleCloudStorageTestServer} with the default endpoint - */ - GoogleCloudStorageTestServer() { - this("https://www.googleapis.com"); - } - /** * Creates a {@link GoogleCloudStorageTestServer} with a custom endpoint */ @@ -87,29 +85,6 @@ public String getEndpoint() { return endpoint; } - /** - * Returns a Google Cloud Storage response for the given request - * - * @param method the HTTP method of the request - * @param url the HTTP URL of the request - * @param headers the HTTP headers of the request - * @param body the HTTP request body - * @return a {@link Response} - * - * @throws IOException if something goes wrong - */ - public Response handle(final String method, - final String url, - final Map> headers, - byte[] body) throws IOException { - - final int questionMark = url.indexOf('?'); - if (questionMark == -1) { - return handle(method, url, null, headers, body); - } - return handle(method, url.substring(0, questionMark), url.substring(questionMark + 1), headers, body); - } - /** * Returns a Google Cloud Storage response for the given request * @@ -165,7 +140,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/buckets/get handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}", (params, headers, body) -> { - String name = params.get("bucket"); + final String name = params.get("bucket"); if (Strings.hasText(name) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing"); } @@ -181,7 +156,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/objects/get handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { - String objectName = params.get("object"); + final String objectName = params.get("object"); if (Strings.hasText(objectName) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); } @@ -191,7 +166,7 @@ private static PathTrie defaultHandlers(final String endpoint, f return newError(RestStatus.NOT_FOUND, "bucket not found"); } - for (Map.Entry object : bucket.objects.entrySet()) { + for (final Map.Entry object : bucket.objects.entrySet()) { if (object.getKey().equals(objectName)) { return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectName, object.getValue())); } @@ -203,7 +178,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/objects/delete handlers.insert("DELETE " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { - String objectName = params.get("object"); + final String objectName = params.get("object"); if (Strings.hasText(objectName) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); } @@ -224,25 +199,149 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/objects/insert handlers.insert("POST " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> { - if ("resumable".equals(params.get("uploadType")) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable"); - } - - final String objectName = params.get("name"); - if (Strings.hasText(objectName) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); - } - - final Bucket bucket = buckets.get(params.get("bucket")); - if (bucket == null) { - return newError(RestStatus.NOT_FOUND, "bucket not found"); - } - - if (bucket.objects.put(objectName, EMPTY_BYTE) == null) { - String location = endpoint + "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id=" + objectName; - return new Response(RestStatus.CREATED, singletonMap("Location", location), XContentType.JSON.mediaType(), EMPTY_BYTE); + final String uploadType = params.get("uploadType"); + if ("resumable".equals(uploadType)) { + final String objectName = params.get("name"); + if (Strings.hasText(objectName) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + final Bucket bucket = buckets.get(params.get("bucket")); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + if (bucket.objects.putIfAbsent(objectName, EMPTY_BYTE) == null) { + final String location = endpoint + "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id=" + + objectName; + return new Response(RestStatus.CREATED, singletonMap("Location", location), XContentType.JSON.mediaType(), EMPTY_BYTE); + } else { + return newError(RestStatus.CONFLICT, "object already exist"); + } + } else if ("multipart".equals(uploadType)) { + /* + * A multipart/related request body looks like this (note the binary dump inside a text blob! nice!): + * --__END_OF_PART__ + * Content-Length: 135 + * Content-Type: application/json; charset=UTF-8 + * content-transfer-encoding: binary + * + * {"bucket":"bucket_test","crc32c":"7XacHQ==","md5Hash":"fVztGkklMlUamsSmJK7W+w==", + * "name":"tests-KEwE3bU4TuyetBgQIghmUw/master.dat-temp"} + * --__END_OF_PART__ + * content-transfer-encoding: binary + * + * KEwE3bU4TuyetBgQIghmUw + * --__END_OF_PART__-- + */ + String boundary = "__END_OF_PART__"; + // Determine the multipart boundary + final List contentTypes = headers.getOrDefault("Content-Type", headers.get("Content-type")); + if (contentTypes != null) { + final String contentType = contentTypes.get(0); + if ((contentType != null) && contentType.contains("multipart/related; boundary=")) { + boundary = contentType.replace("multipart/related; boundary=", ""); + } + } + InputStream inputStreamBody = new ByteArrayInputStream(body); + final List contentEncodings = headers.getOrDefault("Content-Encoding", headers.get("Content-encoding")); + if (contentEncodings != null) { + if (contentEncodings.stream().anyMatch(x -> "gzip".equalsIgnoreCase(x))) { + inputStreamBody = new GZIPInputStream(inputStreamBody); + } + } + // Read line by line ?both? parts of the multipart. Decoding headers as + // IS_8859_1 is safe. + try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStreamBody, StandardCharsets.ISO_8859_1))) { + String line; + // read first part delimiter + line = reader.readLine(); + if ((line == null) || (line.equals("--" + boundary) == false)) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "Error parsing multipart request. Does not start with the part delimiter."); + } + final Map> firstPartHeaders = new HashMap<>(); + // Reads the first part's headers, if any + while ((line = reader.readLine()) != null) { + if (line.equals("\r\n") || (line.length() == 0)) { + // end of headers + break; + } else { + final String[] header = line.split(":", 2); + firstPartHeaders.put(header[0], singletonList(header[1])); + } + } + final List firstPartContentTypes = firstPartHeaders.getOrDefault("Content-Type", + firstPartHeaders.get("Content-type")); + if ((firstPartContentTypes == null) + || (firstPartContentTypes.stream().noneMatch(x -> x.contains("application/json")))) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "Error parsing multipart request. Metadata part expected to have the \"application/json\" content type."); + } + // read metadata part, a single line + line = reader.readLine(); + final byte[] metadata = line.getBytes(StandardCharsets.ISO_8859_1); + if ((firstPartContentTypes != null) && (firstPartContentTypes.stream().anyMatch((x -> x.contains("charset=utf-8"))))) { + // decode as utf-8 + line = new String(metadata, StandardCharsets.UTF_8); + } + final Matcher objectNameMatcher = Pattern.compile("\"name\":\"([^\"]*)\"").matcher(line); + objectNameMatcher.find(); + final String objectName = objectNameMatcher.group(1); + final Matcher bucketNameMatcher = Pattern.compile("\"bucket\":\"([^\"]*)\"").matcher(line); + bucketNameMatcher.find(); + final String bucketName = bucketNameMatcher.group(1); + // read second part delimiter + line = reader.readLine(); + if ((line == null) || (line.equals("--" + boundary) == false)) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "Error parsing multipart request. Second part does not start with delimiter. " + + "Is the metadata multi-line?"); + } + final Map> secondPartHeaders = new HashMap<>(); + // Reads the second part's headers, if any + while ((line = reader.readLine()) != null) { + if (line.equals("\r\n") || (line.length() == 0)) { + // end of headers + break; + } else { + final String[] header = line.split(":", 2); + secondPartHeaders.put(header[0], singletonList(header[1])); + } + } + final List secondPartTransferEncoding = secondPartHeaders.getOrDefault("Content-Transfer-Encoding", + secondPartHeaders.get("content-transfer-encoding")); + if ((secondPartTransferEncoding == null) + || (secondPartTransferEncoding.stream().noneMatch(x -> x.contains("binary")))) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "Error parsing multipart request. Data part expected to have the \"binary\" content transfer encoding."); + } + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + int c; + while ((c = reader.read()) != -1) { + // one char to one byte, because of the ISO_8859_1 encoding + baos.write(c); + } + final byte[] temp = baos.toByteArray(); + final byte[] trailingEnding = ("\r\n--" + boundary + "--\r\n").getBytes(StandardCharsets.ISO_8859_1); + // check trailing + for (int i = trailingEnding.length - 1; i >= 0; i--) { + if (trailingEnding[i] != temp[(temp.length - trailingEnding.length) + i]) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "Error parsing multipart request."); + } + } + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + final byte[] objectData = Arrays.copyOf(temp, temp.length - trailingEnding.length); + if ((objectName != null) && (bucketName != null) && (objectData != null)) { + bucket.objects.put(objectName, objectData); + return new Response(RestStatus.OK, emptyMap(), XContentType.JSON.mediaType(), metadata); + } else { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "error parsing multipart request"); + } + } } else { - return newError(RestStatus.CONFLICT, "object already exist"); + return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable or multipart"); } }); @@ -250,7 +349,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload handlers.insert("PUT " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> { - String objectId = params.get("upload_id"); + final String objectId = params.get("upload_id"); if (Strings.hasText(objectId) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing"); } @@ -268,38 +367,46 @@ private static PathTrie defaultHandlers(final String endpoint, f return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectId, body)); }); - // Copy Object + // Rewrite or Copy Object // + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite // https://cloud.google.com/storage/docs/json_api/v1/objects/copy - handlers.insert("POST " + endpoint + "/storage/v1/b/{srcBucket}/o/{src}/copyTo/b/{destBucket}/o/{dest}", (params, headers, body)-> { - String source = params.get("src"); - if (Strings.hasText(source) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); - } - - final Bucket srcBucket = buckets.get(params.get("srcBucket")); - if (srcBucket == null) { - return newError(RestStatus.NOT_FOUND, "source bucket not found"); - } - - String dest = params.get("dest"); - if (Strings.hasText(dest) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "destination object name is missing"); - } - - final Bucket destBucket = buckets.get(params.get("destBucket")); - if (destBucket == null) { - return newError(RestStatus.NOT_FOUND, "destination bucket not found"); - } - - final byte[] sourceBytes = srcBucket.objects.get(source); - if (sourceBytes == null) { - return newError(RestStatus.NOT_FOUND, "source object not found"); - } - - destBucket.objects.put(dest, sourceBytes); - return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(destBucket.name, dest, sourceBytes)); - }); + handlers.insert("POST " + endpoint + "/storage/v1/b/{srcBucket}/o/{src}/{action}/b/{destBucket}/o/{dest}", + (params, headers, body) -> { + final String action = params.get("action"); + if ((action.equals("rewriteTo") == false) && (action.equals("copyTo") == false)) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "Action not implemented. None of \"rewriteTo\" or \"copyTo\"."); + } + final String source = params.get("src"); + if (Strings.hasText(source) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); + } + final Bucket srcBucket = buckets.get(params.get("srcBucket")); + if (srcBucket == null) { + return newError(RestStatus.NOT_FOUND, "source bucket not found"); + } + final String dest = params.get("dest"); + if (Strings.hasText(dest) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "destination object name is missing"); + } + final Bucket destBucket = buckets.get(params.get("destBucket")); + if (destBucket == null) { + return newError(RestStatus.NOT_FOUND, "destination bucket not found"); + } + final byte[] sourceBytes = srcBucket.objects.get(source); + if (sourceBytes == null) { + return newError(RestStatus.NOT_FOUND, "source object not found"); + } + destBucket.objects.put(dest, sourceBytes); + if (action.equals("rewriteTo")) { + final XContentBuilder respBuilder = jsonBuilder(); + buildRewriteResponse(respBuilder, destBucket.name, dest, sourceBytes.length); + return newResponse(RestStatus.OK, emptyMap(), respBuilder); + } else { + assert action.equals("copyTo"); + return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(destBucket.name, dest, sourceBytes)); + } + }); // List Objects // @@ -317,8 +424,8 @@ private static PathTrie defaultHandlers(final String endpoint, f builder.startArray("items"); final String prefixParam = params.get("prefix"); - for (Map.Entry object : bucket.objects.entrySet()) { - if (prefixParam != null && object.getKey().startsWith(prefixParam) == false) { + for (final Map.Entry object : bucket.objects.entrySet()) { + if ((prefixParam != null) && (object.getKey().startsWith(prefixParam) == false)) { continue; } buildObjectResource(builder, bucket.name, object.getKey(), object.getValue()); @@ -333,7 +440,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/request-body handlers.insert("GET " + endpoint + "/download/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { - String object = params.get("object"); + final String object = params.get("object"); if (Strings.hasText(object) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object id is missing"); } @@ -353,7 +460,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // Batch // // https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch - handlers.insert("POST " + endpoint + "/batch", (params, headers, body) -> { + handlers.insert("POST " + endpoint + "/batch/storage/v1", (params, headers, body) -> { final List batchedResponses = new ArrayList<>(); // A batch request body looks like this: @@ -385,7 +492,7 @@ private static PathTrie defaultHandlers(final String endpoint, f final List contentTypes = headers.getOrDefault("Content-Type", headers.get("Content-type")); if (contentTypes != null) { final String contentType = contentTypes.get(0); - if (contentType != null && contentType.contains("multipart/mixed; boundary=")) { + if ((contentType != null) && contentType.contains("multipart/mixed; boundary=")) { boundary = contentType.replace("multipart/mixed; boundary=", ""); } } @@ -398,25 +505,25 @@ private static PathTrie defaultHandlers(final String endpoint, f while ((line = reader.readLine()) != null) { // Start of a batched request if (line.equals("--" + boundary)) { - Map> batchedHeaders = new HashMap<>(); + final Map> batchedHeaders = new HashMap<>(); // Reads the headers, if any while ((line = reader.readLine()) != null) { - if (line.equals("\r\n") || line.length() == 0) { + if (line.equals("\r\n") || (line.length() == 0)) { // end of headers break; } else { - String[] header = line.split(":", 2); + final String[] header = line.split(":", 2); batchedHeaders.put(header[0], singletonList(header[1])); } } // Reads the method and URL line = reader.readLine(); - String batchedUrl = line.substring(0, line.lastIndexOf(' ')); + final String batchedUrl = line.substring(0, line.lastIndexOf(' ')); final Map batchedParams = new HashMap<>(); - int questionMark = batchedUrl.indexOf('?'); + final int questionMark = batchedUrl.indexOf('?'); if (questionMark != -1) { RestUtils.decodeQueryString(batchedUrl.substring(questionMark + 1), 0, batchedParams); } @@ -424,16 +531,16 @@ private static PathTrie defaultHandlers(final String endpoint, f // Reads the body line = reader.readLine(); byte[] batchedBody = new byte[0]; - if (line != null || line.startsWith("--" + boundary) == false) { + if ((line != null) || (line.startsWith("--" + boundary) == false)) { batchedBody = line.getBytes(StandardCharsets.UTF_8); } // Executes the batched request - RequestHandler handler = handlers.retrieve(batchedUrl, batchedParams); + final RequestHandler handler = handlers.retrieve(batchedUrl, batchedParams); if (handler != null) { try { batchedResponses.add(handler.execute(batchedParams, batchedHeaders, batchedBody)); - } catch (IOException e) { + } catch (final IOException e) { batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); } } @@ -442,11 +549,11 @@ private static PathTrie defaultHandlers(final String endpoint, f } // Now we can build the response - String sep = "--"; - String line = "\r\n"; + final String sep = "--"; + final String line = "\r\n"; - StringBuilder builder = new StringBuilder(); - for (Response response : batchedResponses) { + final StringBuilder builder = new StringBuilder(); + for (final Response response : batchedResponses) { builder.append(sep).append(boundary).append(line); builder.append("Content-Type: application/http").append(line); builder.append(line); @@ -465,7 +572,7 @@ private static PathTrie defaultHandlers(final String endpoint, f builder.append(line); builder.append(sep).append(boundary).append(sep); - byte[] content = builder.toString().getBytes(StandardCharsets.UTF_8); + final byte[] content = builder.toString().getBytes(StandardCharsets.UTF_8); return new Response(RestStatus.OK, emptyMap(), "multipart/mixed; boundary=" + boundary, content); }); @@ -525,7 +632,7 @@ private static Response newResponse(final RestStatus status, final Map { - try { - Bucket bucket = client.buckets().get(bucketName).execute(); - if (bucket != null) { - return Strings.hasText(bucket.getId()); - } - } catch (GoogleJsonResponseException e) { - GoogleJsonError error = e.getDetails(); - if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { - return false; - } - throw e; - } - return false; - }); - } catch (IOException e) { + final Bucket bucket = SocketAccess.doPrivilegedIOException(() -> storage.get(bucketName)); + return bucket != null; + } catch (final Exception e) { throw new BlobStoreException("Unable to check if bucket [" + bucketName + "] exists", e); } } /** - * List all blobs in the bucket + * List blobs in the bucket under the specified path. The path root is removed. * - * @param path base path of the blobs to list + * @param path + * base path of the blobs to list * @return a map of blob names and their metadata */ Map listBlobs(String path) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> listBlobsByPath(bucket, path, path)); + return listBlobsByPrefix(path, ""); } /** * List all blobs in the bucket which have a prefix * - * @param path base path of the blobs to list - * @param prefix prefix of the blobs to list - * @return a map of blob names and their metadata + * @param path + * base path of the blobs to list. This path is removed from the + * names of the blobs returned. + * @param prefix + * prefix of the blobs to list. + * @return a map of blob names and their metadata. */ Map listBlobsByPrefix(String path, String prefix) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> listBlobsByPath(bucket, buildKey(path, prefix), path)); - } - - /** - * Lists all blobs in a given bucket - * - * @param bucketName name of the bucket - * @param path base path of the blobs to list - * @param pathToRemove if true, this path part is removed from blob name - * @return a map of blob names and their metadata - */ - private Map listBlobsByPath(String bucketName, String path, String pathToRemove) throws IOException { - return blobsStream(client, bucketName, path, MAX_BATCHING_REQUESTS) - .map(new BlobMetaDataConverter(pathToRemove)) - .collect(Collectors.toMap(PlainBlobMetaData::name, Function.identity())); + final String pathPrefix = buildKey(path, prefix); + final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + SocketAccess.doPrivilegedVoidIOException(() -> { + storage.get(bucket).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> { + assert blob.getName().startsWith(path); + final String suffixName = blob.getName().substring(path.length()); + mapBuilder.put(suffixName, new PlainBlobMetaData(suffixName, blob.getSize())); + }); + }); + return mapBuilder.immutableMap(); } /** @@ -161,19 +143,9 @@ private Map listBlobsByPath(String bucketName, String path * @return true if the blob exists, false otherwise */ boolean blobExists(String blobName) throws IOException { - try { - StorageObject blob = SocketAccess.doPrivilegedIOException(() -> client.objects().get(bucket, blobName).execute()); - if (blob != null) { - return Strings.hasText(blob.getId()); - } - } catch (GoogleJsonResponseException e) { - GoogleJsonError error = e.getDetails(); - if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { - return false; - } - throw e; - } - return false; + final BlobId blobId = BlobId.of(bucket, blobName); + final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId)); + return blob != null; } /** @@ -183,18 +155,29 @@ boolean blobExists(String blobName) throws IOException { * @return an InputStream */ InputStream readBlob(String blobName) throws IOException { - try { - return SocketAccess.doPrivilegedIOException(() -> { - Storage.Objects.Get object = client.objects().get(bucket, blobName); - return object.executeMediaAsInputStream(); - }); - } catch (GoogleJsonResponseException e) { - GoogleJsonError error = e.getDetails(); - if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { - throw new NoSuchFileException(e.getMessage()); - } - throw e; + final BlobId blobId = BlobId.of(bucket, blobName); + final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId)); + if (blob == null) { + throw new NoSuchFileException("Blob [" + blobName + "] does not exit"); } + final ReadChannel readChannel = SocketAccess.doPrivilegedIOException(blob::reader); + return Channels.newInputStream(new ReadableByteChannel() { + @SuppressForbidden(reason = "Channel is based of a socket not a file") + @Override + public int read(ByteBuffer dst) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> readChannel.read(dst)); + } + + @Override + public boolean isOpen() { + return readChannel.isOpen(); + } + + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedVoidIOException(readChannel::close); + } + }); } /** @@ -204,14 +187,58 @@ InputStream readBlob(String blobName) throws IOException { * @param blobSize expected size of the blob to be written */ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> { - InputStreamContent stream = new InputStreamContent(null, inputStream); - stream.setLength(blobSize); + final BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).build(); + if (blobSize > LARGE_BLOB_THRESHOLD_BYTE_SIZE) { + writeBlobResumable(blobInfo, inputStream); + } else { + writeBlobMultipart(blobInfo, inputStream, blobSize); + } + } - Storage.Objects.Insert insert = client.objects().insert(bucket, null, stream); - insert.setName(blobName); - insert.execute(); - }); + /** + * Uploads a blob using the "resumable upload" method (multiple requests, which + * can be independently retried in case of failure, see + * https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload + * + * @param blobInfo the info for the blob to be uploaded + * @param inputStream the stream containing the blob data + */ + private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException { + final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException(() -> storage.writer(blobInfo)); + Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { + @Override + public boolean isOpen() { + return writeChannel.isOpen(); + } + + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedVoidIOException(writeChannel::close); + } + + @SuppressForbidden(reason = "Channel is based of a socket not a file") + @Override + public int write(ByteBuffer src) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); + } + })); + } + + /** + * Uploads a blob using the "multipart upload" method (a single + * 'multipart/related' request containing both data and metadata. The request is + * gziped), see: + * https://cloud.google.com/storage/docs/json_api/v1/how-tos/multipart-upload + * + * @param blobInfo the info for the blob to be uploaded + * @param inputStream the stream containing the blob data + * @param blobSize the size + */ + private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long blobSize) throws IOException { + assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; + final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); + Streams.copy(inputStream, baos); + SocketAccess.doPrivilegedVoidIOException(() -> storage.create(blobInfo, baos.toByteArray())); } /** @@ -220,10 +247,11 @@ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws I * @param blobName name of the blob */ void deleteBlob(String blobName) throws IOException { - if (!blobExists(blobName)) { + final BlobId blobId = BlobId.of(bucket, blobName); + final boolean deleted = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobId)); + if (deleted == false) { throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); } - SocketAccess.doPrivilegedIOException(() -> client.objects().delete(bucket, blobName).execute()); } /** @@ -232,7 +260,7 @@ void deleteBlob(String blobName) throws IOException { * @param prefix prefix of the buckets to delete */ void deleteBlobsByPrefix(String prefix) throws IOException { - deleteBlobs(listBlobsByPath(bucket, prefix, null).keySet()); + deleteBlobs(listBlobsByPrefix("", prefix).keySet()); } /** @@ -241,163 +269,55 @@ void deleteBlobsByPrefix(String prefix) throws IOException { * @param blobNames names of the bucket to delete */ void deleteBlobs(Collection blobNames) throws IOException { - if (blobNames == null || blobNames.isEmpty()) { + if (blobNames.isEmpty()) { return; } - + // for a single op submit a simple delete instead of a batch of size 1 if (blobNames.size() == 1) { deleteBlob(blobNames.iterator().next()); return; } - final List deletions = new ArrayList<>(Math.min(MAX_BATCHING_REQUESTS, blobNames.size())); - final Iterator blobs = blobNames.iterator(); - - SocketAccess.doPrivilegedVoidIOException(() -> { - while (blobs.hasNext()) { - // Create a delete request for each blob to delete - deletions.add(client.objects().delete(bucket, blobs.next())); - - if (blobs.hasNext() == false || deletions.size() == MAX_BATCHING_REQUESTS) { - try { - // Deletions are executed using a batch request - BatchRequest batch = client.batch(); - - // Used to track successful deletions - CountDown countDown = new CountDown(deletions.size()); - - for (Storage.Objects.Delete delete : deletions) { - // Queue the delete request in batch - delete.queue(batch, new JsonBatchCallback() { - @Override - public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { - logger.error("failed to delete blob [{}] in bucket [{}]: {}", delete.getObject(), delete.getBucket(), e - .getMessage()); - } - - @Override - public void onSuccess(Void aVoid, HttpHeaders responseHeaders) throws IOException { - countDown.countDown(); - } - }); - } - - batch.execute(); - - if (countDown.isCountedDown() == false) { - throw new IOException("Failed to delete all [" + deletions.size() + "] blobs"); - } - } finally { - deletions.clear(); - } - } + final List blobIdsToDelete = blobNames.stream().map(blobName -> BlobId.of(bucket, blobName)).collect(Collectors.toList()); + final List deletedStatuses = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobIdsToDelete)); + assert blobIdsToDelete.size() == deletedStatuses.size(); + boolean failed = false; + for (int i = 0; i < blobIdsToDelete.size(); i++) { + if (deletedStatuses.get(i) == false) { + logger.error("Failed to delete blob [{}] in bucket [{}]", blobIdsToDelete.get(i).getName(), bucket); + failed = true; } - }); + } + if (failed) { + throw new IOException("Failed to delete all [" + blobIdsToDelete.size() + "] blobs"); + } } /** * Moves a blob within the same bucket * * @param sourceBlob name of the blob to move - * @param targetBlob new name of the blob in the target bucket + * @param targetBlob new name of the blob in the same bucket */ - void moveBlob(String sourceBlob, String targetBlob) throws IOException { - SocketAccess.doPrivilegedIOException(() -> { + void moveBlob(String sourceBlobName, String targetBlobName) throws IOException { + final BlobId sourceBlobId = BlobId.of(bucket, sourceBlobName); + final BlobId targetBlobId = BlobId.of(bucket, targetBlobName); + final CopyRequest request = CopyRequest.newBuilder() + .setSource(sourceBlobId) + .setTarget(targetBlobId) + .build(); + SocketAccess.doPrivilegedVoidIOException(() -> { // There's no atomic "move" in GCS so we need to copy and delete - client.objects().copy(bucket, sourceBlob, bucket, targetBlob, null).execute(); - client.objects().delete(bucket, sourceBlob).execute(); - return null; + storage.copy(request).getResult(); + final boolean deleted = storage.delete(sourceBlobId); + if (deleted == false) { + throw new IOException("Failed to move source [" + sourceBlobName + "] to target [" + targetBlobName + "]"); + } }); } - private String buildKey(String keyPath, String s) { + private static String buildKey(String keyPath, String s) { assert s != null; return keyPath + s; } - /** - * Converts a {@link StorageObject} to a {@link PlainBlobMetaData} - */ - class BlobMetaDataConverter implements Function { - - private final String pathToRemove; - - BlobMetaDataConverter(String pathToRemove) { - this.pathToRemove = pathToRemove; - } - - @Override - public PlainBlobMetaData apply(StorageObject storageObject) { - String blobName = storageObject.getName(); - if (Strings.hasLength(pathToRemove)) { - blobName = blobName.substring(pathToRemove.length()); - } - return new PlainBlobMetaData(blobName, storageObject.getSize().longValue()); - } - } - - /** - * Spliterator can be used to list storage objects stored in a bucket. - */ - static class StorageObjectsSpliterator implements Spliterator { - - private final Storage.Objects.List list; - - StorageObjectsSpliterator(Storage client, String bucketName, String prefix, long pageSize) throws IOException { - list = SocketAccess.doPrivilegedIOException(() -> client.objects().list(bucketName)); - list.setMaxResults(pageSize); - if (prefix != null) { - list.setPrefix(prefix); - } - } - - @Override - public boolean tryAdvance(Consumer action) { - try { - // Retrieves the next page of items - Objects objects = SocketAccess.doPrivilegedIOException(list::execute); - - if ((objects == null) || (objects.getItems() == null) || (objects.getItems().isEmpty())) { - return false; - } - - // Consumes all the items - objects.getItems().forEach(action::accept); - - // Sets the page token of the next page, - // null indicates that all items have been consumed - String next = objects.getNextPageToken(); - if (next != null) { - list.setPageToken(next); - return true; - } - - return false; - } catch (Exception e) { - throw new BlobStoreException("Exception while listing objects", e); - } - } - - @Override - public Spliterator trySplit() { - return null; - } - - @Override - public long estimateSize() { - return Long.MAX_VALUE; - } - - @Override - public int characteristics() { - return 0; - } - } - - /** - * Returns a {@link Stream} of {@link StorageObject}s that are stored in a given bucket. - */ - static Stream blobsStream(Storage client, String bucketName, String prefix, long pageSize) throws IOException { - return StreamSupport.stream(new StorageObjectsSpliterator(client, bucketName, prefix, pageSize), false); - } - } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java index 03295c18c8ae6..99df38413326c 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; import com.google.api.services.storage.StorageScopes; +import com.google.auth.oauth2.ServiceAccountCredentials; + +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -28,10 +30,12 @@ import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; +import java.net.URI; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.Locale; import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.common.settings.Setting.timeSetting; @@ -44,11 +48,19 @@ public class GoogleCloudStorageClientSettings { /** A json Service Account file loaded from secure settings. */ static final Setting.AffixSetting CREDENTIALS_FILE_SETTING = Setting.affixKeySetting(PREFIX, "credentials_file", - key -> SecureSetting.secureFile(key, null)); + key -> SecureSetting.secureFile(key, null)); /** An override for the Storage endpoint to connect to. */ static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", - key -> new Setting<>(key, "", s -> s, Setting.Property.NodeScope)); + key -> Setting.simpleString(key, Setting.Property.NodeScope)); + + /** An override for the Google Project ID. */ + static final Setting.AffixSetting PROJECT_ID_SETTING = Setting.affixKeySetting(PREFIX, "project_id", + key -> Setting.simpleString(key, Setting.Property.NodeScope)); + + /** An override for the Token Server URI in the oauth flow. */ + static final Setting.AffixSetting TOKEN_URI_SETTING = Setting.affixKeySetting(PREFIX, "token_uri", + key -> new Setting<>(key, "", URI::create, Setting.Property.NodeScope)); /** * The timeout to establish a connection. A value of {@code -1} corresponds to an infinite timeout. A value of {@code 0} @@ -64,45 +76,59 @@ public class GoogleCloudStorageClientSettings { static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", key -> timeSetting(key, TimeValue.ZERO, TimeValue.MINUS_ONE, Setting.Property.NodeScope)); - /** Name used by the client when it uses the Google Cloud JSON API. **/ + /** Name used by the client when it uses the Google Cloud JSON API. */ static final Setting.AffixSetting APPLICATION_NAME_SETTING = Setting.affixKeySetting(PREFIX, "application_name", - key -> new Setting<>(key, "repository-gcs", s -> s, Setting.Property.NodeScope)); + key -> new Setting<>(key, "repository-gcs", Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated)); - /** The credentials used by the client to connect to the Storage endpoint **/ - private final GoogleCredential credential; + /** The credentials used by the client to connect to the Storage endpoint. */ + private final ServiceAccountCredentials credential; - /** The Storage root URL the client should talk to, or empty string to use the default. **/ + /** The Storage endpoint URL the client should talk to. Null value sets the default. */ private final String endpoint; - /** The timeout to establish a connection **/ + /** The Google project ID overriding the default way to infer it. Null value sets the default. */ + private final String projectId; + + /** The timeout to establish a connection */ private final TimeValue connectTimeout; - /** The timeout to read data from an established connection **/ + /** The timeout to read data from an established connection */ private final TimeValue readTimeout; - /** The Storage client application name **/ + /** The Storage client application name */ private final String applicationName; - GoogleCloudStorageClientSettings(final GoogleCredential credential, + /** The token server URI. This leases access tokens in the oauth flow. */ + private final URI tokenUri; + + GoogleCloudStorageClientSettings(final ServiceAccountCredentials credential, final String endpoint, + final String projectId, final TimeValue connectTimeout, final TimeValue readTimeout, - final String applicationName) { + final String applicationName, + final URI tokenUri) { this.credential = credential; this.endpoint = endpoint; + this.projectId = projectId; this.connectTimeout = connectTimeout; this.readTimeout = readTimeout; this.applicationName = applicationName; + this.tokenUri = tokenUri; } - public GoogleCredential getCredential() { + public ServiceAccountCredentials getCredential() { return credential; } - public String getEndpoint() { + public String getHost() { return endpoint; } + public String getProjectId() { + return Strings.hasLength(projectId) ? projectId : (credential != null ? credential.getProjectId() : null); + } + public TimeValue getConnectTimeout() { return connectTimeout; } @@ -115,9 +141,13 @@ public String getApplicationName() { return applicationName; } + public URI getTokenUri() { + return tokenUri; + } + public static Map load(final Settings settings) { final Map clients = new HashMap<>(); - for (String clientName: settings.getGroups(PREFIX).keySet()) { + for (final String clientName: settings.getGroups(PREFIX).keySet()) { clients.put(clientName, getClientSettings(settings, clientName)); } if (clients.containsKey("default") == false) { @@ -132,22 +162,27 @@ static GoogleCloudStorageClientSettings getClientSettings(final Settings setting return new GoogleCloudStorageClientSettings( loadCredential(settings, clientName), getConfigValue(settings, clientName, ENDPOINT_SETTING), + getConfigValue(settings, clientName, PROJECT_ID_SETTING), getConfigValue(settings, clientName, CONNECT_TIMEOUT_SETTING), getConfigValue(settings, clientName, READ_TIMEOUT_SETTING), - getConfigValue(settings, clientName, APPLICATION_NAME_SETTING) + getConfigValue(settings, clientName, APPLICATION_NAME_SETTING), + getConfigValue(settings, clientName, TOKEN_URI_SETTING) ); } /** - * Loads the service account file corresponding to a given client name. If no file is defined for the client, - * a {@code null} credential is returned. + * Loads the service account file corresponding to a given client name. If no + * file is defined for the client, a {@code null} credential is returned. * - * @param settings the {@link Settings} - * @param clientName the client name + * @param settings + * the {@link Settings} + * @param clientName + * the client name * - * @return the {@link GoogleCredential} to use for the given client, {@code null} if no service account is defined. + * @return the {@link ServiceAccountCredentials} to use for the given client, + * {@code null} if no service account is defined. */ - static GoogleCredential loadCredential(final Settings settings, final String clientName) { + static ServiceAccountCredentials loadCredential(final Settings settings, final String clientName) { try { if (CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).exists(settings) == false) { // explicitly returning null here so that the default credential @@ -155,19 +190,22 @@ static GoogleCredential loadCredential(final Settings settings, final String cli return null; } try (InputStream credStream = CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).get(settings)) { - GoogleCredential credential = GoogleCredential.fromStream(credStream); - if (credential.createScopedRequired()) { - credential = credential.createScoped(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); - } - return credential; + final Collection scopes = Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL); + return SocketAccess.doPrivilegedIOException(() -> { + final ServiceAccountCredentials credentials = ServiceAccountCredentials.fromStream(credStream); + if (credentials.createScopedRequired()) { + return (ServiceAccountCredentials) credentials.createScoped(scopes); + } + return credentials; + }); } - } catch (IOException e) { + } catch (final IOException e) { throw new UncheckedIOException(e); } } private static T getConfigValue(final Settings settings, final String clientName, final Setting.AffixSetting clientSetting) { - Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); + final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index ef24cd959e55b..1d2d70584adf9 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -19,21 +19,6 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.client.auth.oauth2.TokenRequest; -import com.google.api.client.auth.oauth2.TokenResponse; -import com.google.api.client.googleapis.json.GoogleJsonError; -import com.google.api.client.http.GenericUrl; -import com.google.api.client.http.HttpHeaders; -import com.google.api.client.json.GenericJson; -import com.google.api.client.json.webtoken.JsonWebSignature; -import com.google.api.client.json.webtoken.JsonWebToken; -import com.google.api.client.util.ClassInfo; -import com.google.api.client.util.Data; -import com.google.api.services.storage.Storage; -import com.google.api.services.storage.model.Bucket; -import com.google.api.services.storage.model.Objects; -import com.google.api.services.storage.model.StorageObject; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -42,8 +27,6 @@ import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -51,63 +34,6 @@ public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin { - static { - /* - * Google HTTP client changes access levels because its silly and we - * can't allow that on any old stack stack so we pull it here, up front, - * so we can cleanly check the permissions for it. Without this changing - * the permission can fail if any part of core is on the stack because - * our plugin permissions don't allow core to "reach through" plugins to - * change the permission. Because that'd be silly. - */ - SpecialPermission.check(); - AccessController.doPrivileged((PrivilegedAction) () -> { - // ClassInfo put in cache all the fields of a given class - // that are annoted with @Key; at the same time it changes - // the field access level using setAccessible(). Calling - // them here put the ClassInfo in cache (they are never evicted) - // before the SecurityManager is installed. - ClassInfo.of(HttpHeaders.class, true); - - ClassInfo.of(JsonWebSignature.Header.class, false); - ClassInfo.of(JsonWebToken.Payload.class, false); - - ClassInfo.of(TokenRequest.class, false); - ClassInfo.of(TokenResponse.class, false); - - ClassInfo.of(GenericJson.class, false); - ClassInfo.of(GenericUrl.class, false); - - Data.nullOf(GoogleJsonError.ErrorInfo.class); - ClassInfo.of(GoogleJsonError.class, false); - - Data.nullOf(Bucket.Cors.class); - ClassInfo.of(Bucket.class, false); - ClassInfo.of(Bucket.Cors.class, false); - ClassInfo.of(Bucket.Lifecycle.class, false); - ClassInfo.of(Bucket.Logging.class, false); - ClassInfo.of(Bucket.Owner.class, false); - ClassInfo.of(Bucket.Versioning.class, false); - ClassInfo.of(Bucket.Website.class, false); - - ClassInfo.of(StorageObject.class, false); - ClassInfo.of(StorageObject.Owner.class, false); - - ClassInfo.of(Objects.class, false); - - ClassInfo.of(Storage.Buckets.Get.class, false); - ClassInfo.of(Storage.Buckets.Insert.class, false); - - ClassInfo.of(Storage.Objects.Get.class, false); - ClassInfo.of(Storage.Objects.Insert.class, false); - ClassInfo.of(Storage.Objects.Delete.class, false); - ClassInfo.of(Storage.Objects.Copy.class, false); - ClassInfo.of(Storage.Objects.List.class, false); - - return null; - }); - } - private final Map clientsSettings; public GoogleCloudStoragePlugin(final Settings settings) { @@ -134,8 +60,10 @@ public List> getSettings() { return Arrays.asList( GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING, GoogleCloudStorageClientSettings.ENDPOINT_SETTING, + GoogleCloudStorageClientSettings.PROJECT_ID_SETTING, GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING, GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING, - GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING); + GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING, + GoogleCloudStorageClientSettings.TOKEN_URI_SETTING); } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index e193b8238b8d2..976befae0a269 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -19,7 +19,6 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.services.storage.Storage; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -27,7 +26,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.RepositoryException; @@ -39,7 +37,8 @@ import static org.elasticsearch.common.settings.Setting.boolSetting; import static org.elasticsearch.common.settings.Setting.byteSizeSetting; import static org.elasticsearch.common.settings.Setting.simpleString; -import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; + +import com.google.cloud.storage.Storage; class GoogleCloudStorageRepository extends BlobStoreRepository { diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index bccc5e0ffdc5c..57bcc4b131356 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -19,23 +19,26 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; -import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; -import com.google.api.client.http.HttpBackOffIOExceptionHandler; -import com.google.api.client.http.HttpBackOffUnsuccessfulResponseHandler; -import com.google.api.client.http.HttpRequest; -import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.googleapis.GoogleUtils; import com.google.api.client.http.HttpTransport; -import com.google.api.client.http.HttpUnsuccessfulResponseHandler; -import com.google.api.client.json.jackson2.JacksonFactory; -import com.google.api.client.util.ExponentialBackOff; -import com.google.api.services.storage.Storage; +import com.google.api.client.http.javanet.DefaultConnectionFactory; +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; import java.util.Map; public class GoogleCloudStorageService extends AbstractComponent { @@ -51,42 +54,107 @@ public GoogleCloudStorageService(final Environment environment, final Map httpTransport) + .build(); + final StorageOptions.Builder storageOptionsBuilder = StorageOptions.newBuilder() + .setTransportOptions(httpTransportOptions) + .setHeaderProvider(() -> { + final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + if (Strings.hasLength(clientSettings.getApplicationName())) { + mapBuilder.put("user-agent", clientSettings.getApplicationName()); + } + return mapBuilder.immutableMap(); + }); + if (Strings.hasLength(clientSettings.getHost())) { + storageOptionsBuilder.setHost(clientSettings.getHost()); } - if (Strings.hasLength(clientSettings.getEndpoint())) { - storage.setRootUrl(clientSettings.getEndpoint()); + if (Strings.hasLength(clientSettings.getProjectId())) { + storageOptionsBuilder.setProjectId(clientSettings.getProjectId()); } - return storage.build(); + if (clientSettings.getCredential() == null) { + logger.warn("\"Application Default Credentials\" are not supported out of the box." + + " Additional file system permissions have to be granted to the plugin."); + } else { + ServiceAccountCredentials serviceAccountCredentials = clientSettings.getCredential(); + // override token server URI + final URI tokenServerUri = clientSettings.getTokenUri(); + if (Strings.hasLength(tokenServerUri.toString())) { + // Rebuild the service account credentials in order to use a custom Token url. + // This is mostly used for testing purpose. + serviceAccountCredentials = serviceAccountCredentials.toBuilder().setTokenServerUri(tokenServerUri).build(); + } + storageOptionsBuilder.setCredentials(serviceAccountCredentials); + } + return storageOptionsBuilder.build().getService(); } - static HttpRequestInitializer createRequestInitializer(final GoogleCloudStorageClientSettings settings) throws IOException { - GoogleCredential credential = settings.getCredential(); - if (credential == null) { - credential = GoogleCredential.getApplicationDefault(); + /** + * Pins the TLS trust certificates and, more importantly, overrides connection + * URLs in the case of a custom endpoint setting because some connections don't + * fully honor this setting (bugs in the SDK). The default connection factory + * opens a new connection for each request. This is required for the storage + * instance to be thread-safe. + **/ + private static HttpTransport createHttpTransport(final String endpoint) throws Exception { + final NetHttpTransport.Builder builder = new NetHttpTransport.Builder(); + // requires java.lang.RuntimePermission "setFactory" + builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); + if (Strings.hasLength(endpoint)) { + final URL endpointUrl = URI.create(endpoint).toURL(); + builder.setConnectionFactory(new DefaultConnectionFactory() { + @Override + public HttpURLConnection openConnection(final URL originalUrl) throws IOException { + // test if the URL is built correctly, ie following the `host` setting + if (originalUrl.getHost().equals(endpointUrl.getHost()) && originalUrl.getPort() == endpointUrl.getPort() + && originalUrl.getProtocol().equals(endpointUrl.getProtocol())) { + return super.openConnection(originalUrl); + } + // override connection URLs because some don't follow the config. See + // https://github.com/GoogleCloudPlatform/google-cloud-java/issues/3254 and + // https://github.com/GoogleCloudPlatform/google-cloud-java/issues/3255 + URI originalUri; + try { + originalUri = originalUrl.toURI(); + } catch (final URISyntaxException e) { + throw new RuntimeException(e); + } + String overridePath = "/"; + if (originalUri.getRawPath() != null) { + overridePath = originalUri.getRawPath(); + } + if (originalUri.getRawQuery() != null) { + overridePath += "?" + originalUri.getRawQuery(); + } + return super.openConnection( + new URL(endpointUrl.getProtocol(), endpointUrl.getHost(), endpointUrl.getPort(), overridePath)); + } + }); } - return new DefaultHttpRequestInitializer(credential, toTimeout(settings.getConnectTimeout()), toTimeout(settings.getReadTimeout())); + return builder.build(); } - /** Converts timeout values from the settings to a timeout value for the Google Cloud SDK **/ + /** + * Converts timeout values from the settings to a timeout value for the Google + * Cloud SDK + **/ static Integer toTimeout(final TimeValue timeout) { // Null or zero in settings means the default timeout if (timeout == null || TimeValue.ZERO.equals(timeout)) { - return null; + // negative value means using the default value + return -1; } // -1 means infinite timeout if (TimeValue.MINUS_ONE.equals(timeout)) { @@ -96,51 +164,4 @@ static Integer toTimeout(final TimeValue timeout) { return Math.toIntExact(timeout.getMillis()); } - /** - * HTTP request initializer that set timeouts and backoff handler while deferring authentication to GoogleCredential. - * See https://cloud.google.com/storage/transfer/create-client#retry - */ - static class DefaultHttpRequestInitializer implements HttpRequestInitializer { - - private final Integer connectTimeout; - private final Integer readTimeout; - private final GoogleCredential credential; - - DefaultHttpRequestInitializer(GoogleCredential credential, Integer connectTimeoutMillis, Integer readTimeoutMillis) { - this.credential = credential; - this.connectTimeout = connectTimeoutMillis; - this.readTimeout = readTimeoutMillis; - } - - @Override - public void initialize(HttpRequest request) { - if (connectTimeout != null) { - request.setConnectTimeout(connectTimeout); - } - if (readTimeout != null) { - request.setReadTimeout(readTimeout); - } - - request.setIOExceptionHandler(new HttpBackOffIOExceptionHandler(newBackOff())); - request.setInterceptor(credential); - - final HttpUnsuccessfulResponseHandler handler = new HttpBackOffUnsuccessfulResponseHandler(newBackOff()); - request.setUnsuccessfulResponseHandler((req, resp, supportsRetry) -> { - // Let the credential handle the response. If it failed, we rely on our backoff handler - return credential.handleResponse(req, resp, supportsRetry) || handler.handleResponse(req, resp, supportsRetry); - } - ); - } - - private ExponentialBackOff newBackOff() { - return new ExponentialBackOff.Builder() - .setInitialIntervalMillis(100) - .setMaxIntervalMillis(6000) - .setMaxElapsedTimeMillis(900000) - .setMultiplier(1.5) - .setRandomizationFactor(0.5) - .build(); - } - } - } diff --git a/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy index ce9b0334638a0..fffe6cbbc0f24 100644 --- a/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy @@ -18,11 +18,12 @@ */ grant { + // required by: com.google.api.client.json.JsonParser#parseValue permission java.lang.RuntimePermission "accessDeclaredMembers"; - permission java.lang.RuntimePermission "setFactory"; + // required by: com.google.api.client.json.GenericJson# permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; - permission java.net.URLPermission "http://www.googleapis.com/*", "*"; - permission java.net.URLPermission "https://www.googleapis.com/*", "*"; + // required to add google certs to the gcs client trustore + permission java.lang.RuntimePermission "setFactory"; // gcs client opens socket connections for to access repository permission java.net.SocketPermission "*", "connect"; diff --git a/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java new file mode 100644 index 0000000000000..f2b8a0571ad87 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.spi.v1.StorageRpc; + +import static org.mockito.Mockito.mock; + +/** + * Utility class that exposed Google SDK package protected methods to + * create specific StorageRpc objects in unit tests. + */ +public class StorageRpcOptionUtils { + + private StorageRpcOptionUtils(){} + + public static String getPrefix(final Storage.BlobListOption... options) { + if (options != null) { + for (final Option option : options) { + final StorageRpc.Option rpcOption = option.getRpcOption(); + if (StorageRpc.Option.PREFIX.equals(rpcOption)) { + return (String) option.getValue(); + } + } + } + return null; + } + + public static CopyWriter createCopyWriter(final Blob result) { + return new CopyWriter(mock(StorageOptions.class), mock(StorageRpc.RewriteResponse.class)) { + @Override + public Blob getResult() { + return result; + } + }; + } +} diff --git a/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageTestUtils.java b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageTestUtils.java new file mode 100644 index 0000000000000..68175d7f1be53 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageTestUtils.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.google.cloud.storage; + +/** + * Utility class that exposed Google SDK package protected methods to + * create buckets and blobs objects in unit tests. + */ +public class StorageTestUtils { + + private StorageTestUtils(){} + + public static Bucket createBucket(final Storage storage, final String bucketName) { + return new Bucket(storage, (BucketInfo.BuilderImpl) BucketInfo.newBuilder(bucketName)); + } + + public static Blob createBlob(final Storage storage, final String bucketName, final String blobName, final long blobSize) { + return new Blob(storage, (BlobInfo.BuilderImpl) BlobInfo.newBuilder(bucketName, blobName).setSize(blobSize)); + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index 19551f3b082fa..c4d9b67899672 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -19,7 +19,7 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.services.storage.Storage; +import com.google.cloud.storage.Storage; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java index badd86cd8a2b3..14cb4fa242e7d 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java @@ -18,20 +18,25 @@ */ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; import com.google.api.services.storage.StorageScopes; +import com.google.auth.oauth2.ServiceAccountCredentials; + import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; +import java.net.URI; import java.nio.charset.StandardCharsets; import java.security.KeyPair; import java.security.KeyPairGenerator; +import java.util.ArrayList; import java.util.Base64; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; @@ -39,6 +44,7 @@ import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING; +import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.PROJECT_ID_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.getClientSettings; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.loadCredential; @@ -46,59 +52,78 @@ public class GoogleCloudStorageClientSettingsTests extends ESTestCase { public void testLoadWithEmptySettings() { - Map clientsSettings = GoogleCloudStorageClientSettings.load(Settings.EMPTY); + final Map clientsSettings = GoogleCloudStorageClientSettings.load(Settings.EMPTY); assertEquals(1, clientsSettings.size()); assertNotNull(clientsSettings.get("default")); } public void testLoad() throws Exception { final int nbClients = randomIntBetween(1, 5); - final Tuple, Settings> randomClients = randomClients(nbClients); + final List> deprecationWarnings = new ArrayList<>(); + final Tuple, Settings> randomClients = randomClients(nbClients, deprecationWarnings); final Map expectedClientsSettings = randomClients.v1(); - Map actualClientsSettings = GoogleCloudStorageClientSettings.load(randomClients.v2()); + final Map actualClientsSettings = GoogleCloudStorageClientSettings + .load(randomClients.v2()); assertEquals(expectedClientsSettings.size(), actualClientsSettings.size()); - for (String clientName : expectedClientsSettings.keySet()) { - GoogleCloudStorageClientSettings actualClientSettings = actualClientsSettings.get(clientName); + for (final String clientName : expectedClientsSettings.keySet()) { + final GoogleCloudStorageClientSettings actualClientSettings = actualClientsSettings.get(clientName); assertNotNull(actualClientSettings); - GoogleCloudStorageClientSettings expectedClientSettings = expectedClientsSettings.get(clientName); + final GoogleCloudStorageClientSettings expectedClientSettings = expectedClientsSettings.get(clientName); assertNotNull(expectedClientSettings); - assertGoogleCredential(expectedClientSettings.getCredential(), actualClientSettings.getCredential()); - assertEquals(expectedClientSettings.getEndpoint(), actualClientSettings.getEndpoint()); + assertEquals(expectedClientSettings.getHost(), actualClientSettings.getHost()); + assertEquals(expectedClientSettings.getProjectId(), actualClientSettings.getProjectId()); assertEquals(expectedClientSettings.getConnectTimeout(), actualClientSettings.getConnectTimeout()); assertEquals(expectedClientSettings.getReadTimeout(), actualClientSettings.getReadTimeout()); assertEquals(expectedClientSettings.getApplicationName(), actualClientSettings.getApplicationName()); } + + if (deprecationWarnings.isEmpty() == false) { + assertSettingDeprecationsAndWarnings(deprecationWarnings.toArray(new Setting[0])); + } } public void testLoadCredential() throws Exception { - Tuple, Settings> randomClient = randomClients(1); - GoogleCloudStorageClientSettings expectedClientSettings = randomClient.v1().values().iterator().next(); - String clientName = randomClient.v1().keySet().iterator().next(); - + final List> deprecationWarnings = new ArrayList<>(); + final Tuple, Settings> randomClient = randomClients(1, deprecationWarnings); + final GoogleCloudStorageClientSettings expectedClientSettings = randomClient.v1().values().iterator().next(); + final String clientName = randomClient.v1().keySet().iterator().next(); assertGoogleCredential(expectedClientSettings.getCredential(), loadCredential(randomClient.v2(), clientName)); } + public void testProjectIdDefaultsToCredentials() throws Exception { + final String clientName = randomAlphaOfLength(5); + final Tuple credentials = randomCredential(clientName); + final ServiceAccountCredentials credential = credentials.v1(); + final GoogleCloudStorageClientSettings googleCloudStorageClientSettings = new GoogleCloudStorageClientSettings(credential, + ENDPOINT_SETTING.getDefault(Settings.EMPTY), PROJECT_ID_SETTING.getDefault(Settings.EMPTY), + CONNECT_TIMEOUT_SETTING.getDefault(Settings.EMPTY), READ_TIMEOUT_SETTING.getDefault(Settings.EMPTY), + APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY), new URI("")); + assertEquals(credential.getProjectId(), googleCloudStorageClientSettings.getProjectId()); + } + /** Generates a given number of GoogleCloudStorageClientSettings along with the Settings to build them from **/ - private Tuple, Settings> randomClients(final int nbClients) throws Exception { + private Tuple, Settings> randomClients(final int nbClients, + final List> deprecationWarnings) + throws Exception { final Map expectedClients = new HashMap<>(); - expectedClients.put("default", getClientSettings(Settings.EMPTY, "default")); final Settings.Builder settings = Settings.builder(); final MockSecureSettings secureSettings = new MockSecureSettings(); for (int i = 0; i < nbClients; i++) { - String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - - GoogleCloudStorageClientSettings clientSettings = randomClient(clientName, settings, secureSettings); + final String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + final GoogleCloudStorageClientSettings clientSettings = randomClient(clientName, settings, secureSettings, deprecationWarnings); expectedClients.put(clientName, clientSettings); } if (randomBoolean()) { - GoogleCloudStorageClientSettings clientSettings = randomClient("default", settings, secureSettings); + final GoogleCloudStorageClientSettings clientSettings = randomClient("default", settings, secureSettings, deprecationWarnings); expectedClients.put("default", clientSettings); + } else { + expectedClients.put("default", getClientSettings(Settings.EMPTY, "default")); } return Tuple.tuple(expectedClients, settings.setSecureSettings(secureSettings).build()); @@ -107,20 +132,30 @@ private Tuple, Settings> randomCli /** Generates a random GoogleCloudStorageClientSettings along with the Settings to build it **/ private static GoogleCloudStorageClientSettings randomClient(final String clientName, final Settings.Builder settings, - final MockSecureSettings secureSettings) throws Exception { + final MockSecureSettings secureSettings, + final List> deprecationWarnings) throws Exception { - Tuple credentials = randomCredential(clientName); - GoogleCredential credential = credentials.v1(); + final Tuple credentials = randomCredential(clientName); + final ServiceAccountCredentials credential = credentials.v1(); secureSettings.setFile(CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).getKey(), credentials.v2()); String endpoint; if (randomBoolean()) { - endpoint = randomAlphaOfLength(5); + endpoint = randomFrom("http://www.elastic.co", "http://metadata.google.com:88/oauth", "https://www.googleapis.com", + "https://www.elastic.co:443", "http://localhost:8443", "https://www.googleapis.com/oauth/token"); settings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); } else { endpoint = ENDPOINT_SETTING.getDefault(Settings.EMPTY); } + String projectId; + if (randomBoolean()) { + projectId = randomAlphaOfLength(5); + settings.put(PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectId); + } else { + projectId = PROJECT_ID_SETTING.getDefault(Settings.EMPTY); + } + TimeValue connectTimeout; if (randomBoolean()) { connectTimeout = randomTimeout(); @@ -141,40 +176,35 @@ private static GoogleCloudStorageClientSettings randomClient(final String client if (randomBoolean()) { applicationName = randomAlphaOfLength(5); settings.put(APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(), applicationName); + deprecationWarnings.add(APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName)); } else { applicationName = APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY); } - return new GoogleCloudStorageClientSettings(credential, endpoint, connectTimeout, readTimeout, applicationName); + return new GoogleCloudStorageClientSettings(credential, endpoint, projectId, connectTimeout, readTimeout, applicationName, + new URI("")); } /** Generates a random GoogleCredential along with its corresponding Service Account file provided as a byte array **/ - private static Tuple randomCredential(final String clientName) throws Exception { - KeyPair keyPair = KeyPairGenerator.getInstance("RSA").generateKeyPair(); - - GoogleCredential.Builder credentialBuilder = new GoogleCredential.Builder(); - credentialBuilder.setServiceAccountId(clientName); - credentialBuilder.setServiceAccountProjectId("project_id_" + clientName); - credentialBuilder.setServiceAccountScopes(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); - credentialBuilder.setServiceAccountPrivateKey(keyPair.getPrivate()); - credentialBuilder.setServiceAccountPrivateKeyId("private_key_id_" + clientName); - - String encodedPrivateKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); - String serviceAccount = "{\"type\":\"service_account\"," + + private static Tuple randomCredential(final String clientName) throws Exception { + final KeyPair keyPair = KeyPairGenerator.getInstance("RSA").generateKeyPair(); + final ServiceAccountCredentials.Builder credentialBuilder = ServiceAccountCredentials.newBuilder(); + credentialBuilder.setClientId("id_" + clientName); + credentialBuilder.setClientEmail(clientName); + credentialBuilder.setProjectId("project_id_" + clientName); + credentialBuilder.setPrivateKey(keyPair.getPrivate()); + credentialBuilder.setPrivateKeyId("private_key_id_" + clientName); + credentialBuilder.setScopes(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); + final String encodedPrivateKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); + final String serviceAccount = "{\"type\":\"service_account\"," + "\"project_id\":\"project_id_" + clientName + "\"," + "\"private_key_id\":\"private_key_id_" + clientName + "\"," + "\"private_key\":\"-----BEGIN PRIVATE KEY-----\\n" + encodedPrivateKey + "\\n-----END PRIVATE KEY-----\\n\"," + "\"client_email\":\"" + clientName + "\"," + - "\"client_id\":\"id_" + clientName + "\"," + - "\"auth_uri\":\"https://accounts.google.com/o/oauth2/auth\"," + - "\"token_uri\":\"https://accounts.google.com/o/oauth2/token\"," + - "\"auth_provider_x509_cert_url\":\"https://www.googleapis.com/oauth2/v1/certs\"," + - "\"client_x509_cert_url\":\"https://www.googleapis.com/robot/v1/metadata/x509/" + - clientName + - "%40appspot.gserviceaccount.com\"}"; - + "\"client_id\":\"id_" + clientName + "\"" + + "}"; return Tuple.tuple(credentialBuilder.build(), serviceAccount.getBytes(StandardCharsets.UTF_8)); } @@ -182,14 +212,16 @@ private static TimeValue randomTimeout() { return randomFrom(TimeValue.MINUS_ONE, TimeValue.ZERO, TimeValue.parseTimeValue(randomPositiveTimeValue(), "test")); } - private static void assertGoogleCredential(final GoogleCredential expected, final GoogleCredential actual) { + private static void assertGoogleCredential(ServiceAccountCredentials expected, ServiceAccountCredentials actual) { if (expected != null) { assertEquals(expected.getServiceAccountUser(), actual.getServiceAccountUser()); - assertEquals(expected.getServiceAccountId(), actual.getServiceAccountId()); - assertEquals(expected.getServiceAccountProjectId(), actual.getServiceAccountProjectId()); - assertEquals(expected.getServiceAccountScopesAsString(), actual.getServiceAccountScopesAsString()); - assertEquals(expected.getServiceAccountPrivateKey(), actual.getServiceAccountPrivateKey()); - assertEquals(expected.getServiceAccountPrivateKeyId(), actual.getServiceAccountPrivateKeyId()); + assertEquals(expected.getClientId(), actual.getClientId()); + assertEquals(expected.getClientEmail(), actual.getClientEmail()); + assertEquals(expected.getAccount(), actual.getAccount()); + assertEquals(expected.getProjectId(), actual.getProjectId()); + assertEquals(expected.getScopes(), actual.getScopes()); + assertEquals(expected.getPrivateKey(), actual.getPrivateKey()); + assertEquals(expected.getPrivateKeyId(), actual.getPrivateKeyId()); } else { assertNull(actual); } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 44897819fd9e3..a33ae90c549bc 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -19,79 +19,65 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; -import com.google.api.client.http.GenericUrl; -import com.google.api.client.http.HttpIOExceptionHandler; -import com.google.api.client.http.HttpRequest; -import com.google.api.client.http.HttpRequestFactory; -import com.google.api.client.http.HttpRequestInitializer; -import com.google.api.client.http.HttpResponse; -import com.google.api.client.http.HttpUnsuccessfulResponseHandler; -import com.google.api.client.testing.http.MockHttpTransport; +import com.google.auth.Credentials; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.Storage; + +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; +import java.util.Collections; +import java.util.Locale; -import java.io.IOException; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class GoogleCloudStorageServiceTests extends ESTestCase { - /** - * Test that the {@link GoogleCloudStorageService.DefaultHttpRequestInitializer} attaches new instances - * of {@link HttpIOExceptionHandler} and {@link HttpUnsuccessfulResponseHandler} for every HTTP requests. - */ - public void testDefaultHttpRequestInitializer() throws IOException { + public void testClientInitializer() throws Exception { + final String clientName = randomAlphaOfLength(4).toLowerCase(Locale.ROOT); final Environment environment = mock(Environment.class); - when(environment.settings()).thenReturn(Settings.EMPTY); - - final GoogleCredential credential = mock(GoogleCredential.class); - when(credential.handleResponse(any(HttpRequest.class), any(HttpResponse.class), anyBoolean())).thenReturn(false); - - final TimeValue readTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); - final TimeValue connectTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); - final String endpoint = randomBoolean() ? randomAlphaOfLength(10) : null; - final String applicationName = randomBoolean() ? randomAlphaOfLength(10) : null; - - final GoogleCloudStorageClientSettings clientSettings = - new GoogleCloudStorageClientSettings(credential, endpoint, connectTimeout, readTimeout, applicationName); - - final HttpRequestInitializer initializer = GoogleCloudStorageService.createRequestInitializer(clientSettings); - final HttpRequestFactory requestFactory = new MockHttpTransport().createRequestFactory(initializer); - - final HttpRequest request1 = requestFactory.buildGetRequest(new GenericUrl()); - assertEquals((int) connectTimeout.millis(), request1.getConnectTimeout()); - assertEquals((int) readTimeout.millis(), request1.getReadTimeout()); - assertSame(credential, request1.getInterceptor()); - assertNotNull(request1.getIOExceptionHandler()); - assertNotNull(request1.getUnsuccessfulResponseHandler()); - - final HttpRequest request2 = requestFactory.buildGetRequest(new GenericUrl()); - assertEquals((int) connectTimeout.millis(), request2.getConnectTimeout()); - assertEquals((int) readTimeout.millis(), request2.getReadTimeout()); - assertSame(request1.getInterceptor(), request2.getInterceptor()); - assertNotNull(request2.getIOExceptionHandler()); - assertNotSame(request1.getIOExceptionHandler(), request2.getIOExceptionHandler()); - assertNotNull(request2.getUnsuccessfulResponseHandler()); - assertNotSame(request1.getUnsuccessfulResponseHandler(), request2.getUnsuccessfulResponseHandler()); - - request1.getUnsuccessfulResponseHandler().handleResponse(null, null, false); - verify(credential, times(1)).handleResponse(any(HttpRequest.class), any(HttpResponse.class), anyBoolean()); - - request2.getUnsuccessfulResponseHandler().handleResponse(null, null, false); - verify(credential, times(2)).handleResponse(any(HttpRequest.class), any(HttpResponse.class), anyBoolean()); + final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final String applicationName = randomAlphaOfLength(4); + final String hostName = randomFrom("http://", "https://") + randomAlphaOfLength(4) + ":" + randomIntBetween(1, 65535); + final String projectIdName = randomAlphaOfLength(4); + final Settings settings = Settings.builder() + .put(GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + connectTimeValue.getStringRep()) + .put(GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + readTimeValue.getStringRep()) + .put(GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + applicationName) + .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), hostName) + .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) + .build(); + when(environment.settings()).thenReturn(settings); + final GoogleCloudStorageClientSettings clientSettings = GoogleCloudStorageClientSettings.getClientSettings(settings, clientName); + final GoogleCloudStorageService service = new GoogleCloudStorageService(environment, + Collections.singletonMap(clientName, clientSettings)); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.createClient("another_client")); + assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); + assertSettingDeprecationsAndWarnings( + new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) }); + final Storage storage = service.createClient(clientName); + assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); + assertThat(storage.getOptions().getHost(), Matchers.is(hostName)); + assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); + assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); + assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), + Matchers.is((int) connectTimeValue.millis())); + assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), + Matchers.is((int) readTimeValue.millis())); + assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); } public void testToTimeout() { - assertNull(GoogleCloudStorageService.toTimeout(null)); - assertNull(GoogleCloudStorageService.toTimeout(TimeValue.ZERO)); + assertEquals(-1, GoogleCloudStorageService.toTimeout(null).intValue()); + assertEquals(-1, GoogleCloudStorageService.toTimeout(TimeValue.ZERO).intValue()); assertEquals(0, GoogleCloudStorageService.toTimeout(TimeValue.MINUS_ONE).intValue()); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index 325cea132beb6..2b52b7a32a9cc 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -19,289 +19,478 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.json.GoogleJsonError; -import com.google.api.client.googleapis.json.GoogleJsonResponseException; -import com.google.api.client.http.AbstractInputStreamContent; -import com.google.api.client.http.HttpHeaders; -import com.google.api.client.http.HttpMethods; -import com.google.api.client.http.HttpRequest; -import com.google.api.client.http.HttpRequestInitializer; -import com.google.api.client.http.HttpResponseException; -import com.google.api.client.http.LowLevelHttpRequest; -import com.google.api.client.http.LowLevelHttpResponse; -import com.google.api.client.http.MultipartContent; -import com.google.api.client.json.JsonFactory; -import com.google.api.client.testing.http.MockHttpTransport; -import com.google.api.client.testing.http.MockLowLevelHttpRequest; -import com.google.api.client.testing.http.MockLowLevelHttpResponse; -import com.google.api.services.storage.Storage; -import com.google.api.services.storage.model.Bucket; -import com.google.api.services.storage.model.StorageObject; -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.rest.RestStatus; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageBatch; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.StorageRpcOptionUtils; +import com.google.cloud.storage.StorageTestUtils; + +import org.elasticsearch.core.internal.io.IOUtils; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; -import java.math.BigInteger; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; import java.util.ArrayList; +import java.util.List; +import java.util.Objects; import java.util.concurrent.ConcurrentMap; - -import static org.mockito.Mockito.mock; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; /** * {@link MockStorage} mocks a {@link Storage} client by storing all the blobs * in a given concurrent map. */ -class MockStorage extends Storage { - - /* A custom HTTP header name used to propagate the name of the blobs to delete in batch requests */ - private static final String DELETION_HEADER = "x-blob-to-delete"; +class MockStorage implements Storage { private final String bucketName; private final ConcurrentMap blobs; MockStorage(final String bucket, final ConcurrentMap blobs) { - super(new MockedHttpTransport(blobs), mock(JsonFactory.class), mock(HttpRequestInitializer.class)); - this.bucketName = bucket; - this.blobs = blobs; + this.bucketName = Objects.requireNonNull(bucket); + this.blobs = Objects.requireNonNull(blobs); } @Override - public Buckets buckets() { - return new MockBuckets(); + public Bucket get(String bucket, BucketGetOption... options) { + if (bucketName.equals(bucket)) { + return StorageTestUtils.createBucket(this, bucketName); + } else { + return null; + } } @Override - public Objects objects() { - return new MockObjects(); + public Blob get(BlobId blob) { + if (bucketName.equals(blob.getBucket())) { + final byte[] bytes = blobs.get(blob.getName()); + if (bytes != null) { + return StorageTestUtils.createBlob(this, bucketName, blob.getName(), bytes.length); + } + } + return null; } - class MockBuckets extends Buckets { + @Override + public boolean delete(BlobId blob) { + if (bucketName.equals(blob.getBucket()) && blobs.containsKey(blob.getName())) { + return blobs.remove(blob.getName()) != null; + } + return false; + } - @Override - public Get get(String getBucket) { - return new Get(getBucket) { - @Override - public Bucket execute() { - if (bucketName.equals(getBucket())) { - Bucket bucket = new Bucket(); - bucket.setId(bucketName); - return bucket; - } else { - return null; - } - } - }; + @Override + public List delete(Iterable blobIds) { + final List ans = new ArrayList<>(); + for (final BlobId blobId : blobIds) { + ans.add(delete(blobId)); } + return ans; } - class MockObjects extends Objects { + @Override + public Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... options) { + if (bucketName.equals(blobInfo.getBucket()) == false) { + throw new StorageException(404, "Bucket not found"); + } + blobs.put(blobInfo.getName(), content); + return get(BlobId.of(blobInfo.getBucket(), blobInfo.getName())); + } + + @Override + public CopyWriter copy(CopyRequest copyRequest) { + if (bucketName.equals(copyRequest.getSource().getBucket()) == false) { + throw new StorageException(404, "Source bucket not found"); + } + if (bucketName.equals(copyRequest.getTarget().getBucket()) == false) { + throw new StorageException(404, "Target bucket not found"); + } + + final byte[] bytes = blobs.get(copyRequest.getSource().getName()); + if (bytes == null) { + throw new StorageException(404, "Source blob does not exist"); + } + blobs.put(copyRequest.getTarget().getName(), bytes); + return StorageRpcOptionUtils + .createCopyWriter(get(BlobId.of(copyRequest.getTarget().getBucket(), copyRequest.getTarget().getName()))); + } + + @Override + public Page list(String bucket, BlobListOption... options) { + if (bucketName.equals(bucket) == false) { + throw new StorageException(404, "Bucket not found"); + } + final Storage storage = this; + final String prefix = StorageRpcOptionUtils.getPrefix(options); - @Override - public Get get(String getBucket, String getObject) { - return new Get(getBucket, getObject) { + return new Page() { + @Override + public boolean hasNextPage() { + return false; + } + + @Override + public String getNextPageToken() { + return null; + } + + @Override + public Page getNextPage() { + throw new UnsupportedOperationException(); + } + + @Override + public Iterable iterateAll() { + return blobs.entrySet().stream() + .filter(blob -> ((prefix == null) || blob.getKey().startsWith(prefix))) + .map(blob -> StorageTestUtils.createBlob(storage, bucketName, blob.getKey(), blob.getValue().length)) + .collect(Collectors.toList()); + } + + @Override + public Iterable getValues() { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public ReadChannel reader(BlobId blob, BlobSourceOption... options) { + if (bucketName.equals(blob.getBucket())) { + final byte[] bytes = blobs.get(blob.getName()); + final ReadableByteChannel readableByteChannel = Channels.newChannel(new ByteArrayInputStream(bytes)); + return new ReadChannel() { @Override - public StorageObject execute() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } - if (blobs.containsKey(getObject()) == false) { - throw newObjectNotFoundException(getObject()); - } - - StorageObject storageObject = new StorageObject(); - storageObject.setId(getObject()); - return storageObject; + public void close() { + IOUtils.closeWhileHandlingException(readableByteChannel); } @Override - public InputStream executeMediaAsInputStream() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } - if (blobs.containsKey(getObject()) == false) { - throw newObjectNotFoundException(getObject()); - } - return new ByteArrayInputStream(blobs.get(getObject())); + public void seek(long position) throws IOException { + throw new UnsupportedOperationException(); } - }; - } - @Override - public Insert insert(String insertBucket, StorageObject insertObject, AbstractInputStreamContent insertStream) { - return new Insert(insertBucket, insertObject) { @Override - public StorageObject execute() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } - - ByteArrayOutputStream out = new ByteArrayOutputStream(); - Streams.copy(insertStream.getInputStream(), out); - blobs.put(getName(), out.toByteArray()); - return null; + public void setChunkSize(int chunkSize) { + throw new UnsupportedOperationException(); + } + + @Override + public RestorableState capture() { + throw new UnsupportedOperationException(); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return readableByteChannel.read(dst); } - }; - } - @Override - public List list(String listBucket) { - return new List(listBucket) { @Override - public com.google.api.services.storage.model.Objects execute() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } - - final com.google.api.services.storage.model.Objects objects = new com.google.api.services.storage.model.Objects(); - - final java.util.List storageObjects = new ArrayList<>(); - for (Entry blob : blobs.entrySet()) { - if (getPrefix() == null || blob.getKey().startsWith(getPrefix())) { - StorageObject storageObject = new StorageObject(); - storageObject.setId(blob.getKey()); - storageObject.setName(blob.getKey()); - storageObject.setSize(BigInteger.valueOf((long) blob.getValue().length)); - storageObjects.add(storageObject); - } - } - - objects.setItems(storageObjects); - return objects; + public boolean isOpen() { + return readableByteChannel.isOpen(); } }; } + return null; + } + + @Override + public WriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options) { + if (bucketName.equals(blobInfo.getBucket())) { + final ByteArrayOutputStream output = new ByteArrayOutputStream(); + return new WriteChannel() { + + final WritableByteChannel writableByteChannel = Channels.newChannel(output); - @Override - public Delete delete(String deleteBucket, String deleteObject) { - return new Delete(deleteBucket, deleteObject) { @Override - public Void execute() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } + public void setChunkSize(int chunkSize) { + throw new UnsupportedOperationException(); + } - if (blobs.containsKey(getObject()) == false) { - throw newObjectNotFoundException(getObject()); - } + @Override + public RestorableState capture() { + throw new UnsupportedOperationException(); + } - blobs.remove(getObject()); - return null; + @Override + public int write(ByteBuffer src) throws IOException { + return writableByteChannel.write(src); } @Override - public HttpRequest buildHttpRequest() throws IOException { - HttpRequest httpRequest = super.buildHttpRequest(); - httpRequest.getHeaders().put(DELETION_HEADER, getObject()); - return httpRequest; + public boolean isOpen() { + return writableByteChannel.isOpen(); } - }; - } - @Override - public Copy copy(String srcBucket, String srcObject, String destBucket, String destObject, StorageObject content) { - return new Copy(srcBucket, srcObject, destBucket, destObject, content) { @Override - public StorageObject execute() throws IOException { - if (bucketName.equals(getSourceBucket()) == false) { - throw newBucketNotFoundException(getSourceBucket()); - } - if (bucketName.equals(getDestinationBucket()) == false) { - throw newBucketNotFoundException(getDestinationBucket()); - } - - final byte[] bytes = blobs.get(getSourceObject()); - if (bytes == null) { - throw newObjectNotFoundException(getSourceObject()); - } - blobs.put(getDestinationObject(), bytes); - - StorageObject storageObject = new StorageObject(); - storageObject.setId(getDestinationObject()); - return storageObject; + public void close() throws IOException { + IOUtils.closeWhileHandlingException(writableByteChannel); + blobs.put(blobInfo.getName(), output.toByteArray()); } }; } + return null; } - private static GoogleJsonResponseException newBucketNotFoundException(final String bucket) { - HttpResponseException.Builder builder = new HttpResponseException.Builder(404, "Bucket not found: " + bucket, new HttpHeaders()); - return new GoogleJsonResponseException(builder, new GoogleJsonError()); + // Everything below this line is not implemented. + + @Override + public Bucket create(BucketInfo bucketInfo, BucketTargetOption... options) { + return null; } - private static GoogleJsonResponseException newObjectNotFoundException(final String object) { - HttpResponseException.Builder builder = new HttpResponseException.Builder(404, "Object not found: " + object, new HttpHeaders()); - return new GoogleJsonResponseException(builder, new GoogleJsonError()); + @Override + public Blob create(BlobInfo blobInfo, BlobTargetOption... options) { + return null; } - /** - * {@link MockedHttpTransport} extends the existing testing transport to analyze the content - * of {@link com.google.api.client.googleapis.batch.BatchRequest} and delete the appropriates - * blobs. We use this because {@link Storage#batch()} is final and there is no other way to - * extend batch requests for testing purposes. - */ - static class MockedHttpTransport extends MockHttpTransport { + @Override + public Blob create(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) { + return null; + } - private final ConcurrentMap blobs; + @Override + public Blob get(String bucket, String blob, BlobGetOption... options) { + return null; + } - MockedHttpTransport(final ConcurrentMap blobs) { - this.blobs = blobs; - } + @Override + public Blob get(BlobId blob, BlobGetOption... options) { + return null; + } - @Override - public LowLevelHttpRequest buildRequest(final String method, final String url) throws IOException { - // We analyze the content of the Batch request to detect our custom HTTP header, - // and extract from it the name of the blob to delete. Then we reply a simple - // batch response so that the client parser is happy. - // - // See https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch for the - // format of the batch request body. - if (HttpMethods.POST.equals(method) && url.endsWith("/batch")) { - return new MockLowLevelHttpRequest() { - @Override - public LowLevelHttpResponse execute() throws IOException { - final String contentType = new MultipartContent().getType(); - - final StringBuilder builder = new StringBuilder(); - try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { - getStreamingContent().writeTo(out); - - Streams.readAllLines(new ByteArrayInputStream(out.toByteArray()), line -> { - if (line != null && line.startsWith(DELETION_HEADER)) { - builder.append("--__END_OF_PART__\r\n"); - builder.append("Content-Type: application/http").append("\r\n"); - builder.append("\r\n"); - builder.append("HTTP/1.1 "); - - final String blobName = line.substring(line.indexOf(':') + 1).trim(); - if (blobs.containsKey(blobName)) { - builder.append(RestStatus.OK.getStatus()); - blobs.remove(blobName); - } else { - builder.append(RestStatus.NOT_FOUND.getStatus()); - } - builder.append("\r\n"); - builder.append("Content-Type: application/json; charset=UTF-8").append("\r\n"); - builder.append("Content-Length: 0").append("\r\n"); - builder.append("\r\n"); - } - }); - builder.append("\r\n"); - builder.append("--__END_OF_PART__--"); - } - - MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); - response.setStatusCode(200); - response.setContent(builder.toString()); - response.setContentType(contentType); - return response; - } - }; - } else { - return super.buildRequest(method, url); - } - } + @Override + public Page list(BucketListOption... options) { + return null; + } + + @Override + public Bucket update(BucketInfo bucketInfo, BucketTargetOption... options) { + return null; + } + + @Override + public Blob update(BlobInfo blobInfo, BlobTargetOption... options) { + return null; + } + + @Override + public Blob update(BlobInfo blobInfo) { + return null; + } + + @Override + public boolean delete(String bucket, BucketSourceOption... options) { + return false; + } + + @Override + public boolean delete(String bucket, String blob, BlobSourceOption... options) { + return false; + } + + @Override + public boolean delete(BlobId blob, BlobSourceOption... options) { + return false; + } + + @Override + public Blob compose(ComposeRequest composeRequest) { + return null; + } + + @Override + public byte[] readAllBytes(String bucket, String blob, BlobSourceOption... options) { + return new byte[0]; + } + + @Override + public byte[] readAllBytes(BlobId blob, BlobSourceOption... options) { + return new byte[0]; + } + + @Override + public StorageBatch batch() { + return null; + } + + @Override + public ReadChannel reader(String bucket, String blob, BlobSourceOption... options) { + return null; + } + + @Override + public URL signUrl(BlobInfo blobInfo, long duration, TimeUnit unit, SignUrlOption... options) { + return null; + } + + @Override + public List get(BlobId... blobIds) { + return null; + } + + @Override + public List get(Iterable blobIds) { + return null; + } + + @Override + public List update(BlobInfo... blobInfos) { + return null; + } + + @Override + public List update(Iterable blobInfos) { + return null; + } + + @Override + public List delete(BlobId... blobIds) { + return null; + } + + @Override + public Acl getAcl(String bucket, Acl.Entity entity, BucketSourceOption... options) { + return null; + } + + @Override + public Acl getAcl(String bucket, Acl.Entity entity) { + return null; + } + + @Override + public boolean deleteAcl(String bucket, Acl.Entity entity, BucketSourceOption... options) { + return false; + } + + @Override + public boolean deleteAcl(String bucket, Acl.Entity entity) { + return false; + } + + @Override + public Acl createAcl(String bucket, Acl acl, BucketSourceOption... options) { + return null; + } + + @Override + public Acl createAcl(String bucket, Acl acl) { + return null; + } + + @Override + public Acl updateAcl(String bucket, Acl acl, BucketSourceOption... options) { + return null; + } + + @Override + public Acl updateAcl(String bucket, Acl acl) { + return null; + } + + @Override + public List listAcls(String bucket, BucketSourceOption... options) { + return null; + } + + @Override + public List listAcls(String bucket) { + return null; + } + + @Override + public Acl getDefaultAcl(String bucket, Acl.Entity entity) { + return null; + } + + @Override + public boolean deleteDefaultAcl(String bucket, Acl.Entity entity) { + return false; + } + + @Override + public Acl createDefaultAcl(String bucket, Acl acl) { + return null; + } + + @Override + public Acl updateDefaultAcl(String bucket, Acl acl) { + return null; + } + + @Override + public List listDefaultAcls(String bucket) { + return null; + } + + @Override + public Acl getAcl(BlobId blob, Acl.Entity entity) { + return null; + } + + @Override + public boolean deleteAcl(BlobId blob, Acl.Entity entity) { + return false; + } + + @Override + public Acl createAcl(BlobId blob, Acl acl) { + return null; + } + + @Override + public Acl updateAcl(BlobId blob, Acl acl) { + return null; + } + + @Override + public List listAcls(BlobId blob) { + return null; + } + + @Override + public Policy getIamPolicy(String bucket, BucketSourceOption... options) { + return null; + } + + @Override + public Policy setIamPolicy(String bucket, Policy policy, BucketSourceOption... options) { + return null; + } + + @Override + public List testIamPermissions(String bucket, List permissions, BucketSourceOption... options) { + return null; + } + + @Override + public ServiceAccount getServiceAccount(String projectId) { + return null; + } + + @Override + public StorageOptions getOptions() { + return null; } } diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 010c4b92c21a0..e31495efc0eef 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -90,6 +90,8 @@ public List> getSettings() { S3ClientSettings.PROXY_PASSWORD_SETTING, S3ClientSettings.READ_TIMEOUT_SETTING, S3ClientSettings.MAX_RETRIES_SETTING, - S3ClientSettings.USE_THROTTLE_RETRIES_SETTING); + S3ClientSettings.USE_THROTTLE_RETRIES_SETTING, + S3Repository.ACCESS_KEY_SETTING, + S3Repository.SECRET_KEY_SETTING); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java index e3e89c41514de..e599f84b411e4 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -21,7 +21,10 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.model.CannedAccessControlList; import com.amazonaws.services.s3.model.StorageClass; + +import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -29,6 +32,12 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.Repository; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; +import org.elasticsearch.test.rest.FakeRestRequest; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -38,9 +47,14 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptyMap; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; public class S3BlobStoreRepositoryTests extends ESBlobStoreRepositoryIntegTestCase { @@ -81,7 +95,9 @@ protected void createTestRepository(final String name) { .put(S3Repository.BUFFER_SIZE_SETTING.getKey(), bufferSize) .put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), serverSideEncryption) .put(S3Repository.CANNED_ACL_SETTING.getKey(), cannedACL) - .put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass))); + .put(S3Repository.STORAGE_CLASS_SETTING.getKey(), storageClass) + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret") + .put(S3Repository.SECRET_KEY_SETTING.getKey(), "not_used_but_this_is_a_secret"))); } @Override @@ -106,4 +122,32 @@ public synchronized AmazonS3 client(final Settings repositorySettings) { })); } } + + public void testInsecureRepositoryCredentials() throws Exception { + final String repositoryName = "testInsecureRepositoryCredentials"; + createTestRepository(repositoryName); + final NodeClient nodeClient = internalCluster().getInstance(NodeClient.class); + final RestGetRepositoriesAction getRepoAction = new RestGetRepositoriesAction(Settings.EMPTY, mock(RestController.class), + internalCluster().getInstance(SettingsFilter.class)); + final RestRequest getRepoRequest = new FakeRestRequest(); + getRepoRequest.params().put("repository", repositoryName); + final CountDownLatch getRepoLatch = new CountDownLatch(1); + final AtomicReference getRepoError = new AtomicReference<>(); + getRepoAction.handleRequest(getRepoRequest, new AbstractRestChannel(getRepoRequest, true) { + @Override + public void sendResponse(RestResponse response) { + try { + assertThat(response.content().utf8ToString(), not(containsString("not_used_but_this_is_a_secret"))); + } catch (final AssertionError ex) { + getRepoError.set(ex); + } + getRepoLatch.countDown(); + } + }, nodeClient); + getRepoLatch.await(); + if (getRepoError.get() != null) { + throw getRepoError.get(); + } + } + } diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 60fef4b34241d..e278ebf47983e 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -29,4 +29,115 @@ compileTestJava.options.compilerArgs << "-Xlint:-rawtypes,-unchecked" dependencies { compile "org.elasticsearch:elasticsearch-nio:${version}" -} \ No newline at end of file + + // network stack + compile "io.netty:netty-buffer:4.1.16.Final" + compile "io.netty:netty-codec:4.1.16.Final" + compile "io.netty:netty-codec-http:4.1.16.Final" + compile "io.netty:netty-common:4.1.16.Final" + compile "io.netty:netty-handler:4.1.16.Final" + compile "io.netty:netty-resolver:4.1.16.Final" + compile "io.netty:netty-transport:4.1.16.Final" +} + +thirdPartyAudit.excludes = [ + // classes are missing + + // from io.netty.handler.codec.protobuf.ProtobufDecoder (netty) + 'com.google.protobuf.ExtensionRegistry', + 'com.google.protobuf.MessageLite$Builder', + 'com.google.protobuf.MessageLite', + 'com.google.protobuf.Parser', + + // from io.netty.logging.CommonsLoggerFactory (netty) + 'org.apache.commons.logging.Log', + 'org.apache.commons.logging.LogFactory', + + // from io.netty.handler.ssl.OpenSslEngine (netty) + 'io.netty.internal.tcnative.Buffer', + 'io.netty.internal.tcnative.Library', + 'io.netty.internal.tcnative.SSL', + 'io.netty.internal.tcnative.SSLContext', + + // from io.netty.handler.ssl.util.BouncyCastleSelfSignedCertGenerator (netty) + 'org.bouncycastle.asn1.x500.X500Name', + 'org.bouncycastle.cert.X509v3CertificateBuilder', + 'org.bouncycastle.cert.jcajce.JcaX509CertificateConverter', + 'org.bouncycastle.cert.jcajce.JcaX509v3CertificateBuilder', + 'org.bouncycastle.jce.provider.BouncyCastleProvider', + 'org.bouncycastle.operator.jcajce.JcaContentSignerBuilder', + + // from io.netty.handler.ssl.JettyNpnSslEngine (netty) + 'org.eclipse.jetty.npn.NextProtoNego$ClientProvider', + 'org.eclipse.jetty.npn.NextProtoNego$ServerProvider', + 'org.eclipse.jetty.npn.NextProtoNego', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteInput (netty) + 'org.jboss.marshalling.ByteInput', + + // from io.netty.handler.codec.marshalling.ChannelBufferByteOutput (netty) + 'org.jboss.marshalling.ByteOutput', + + // from io.netty.handler.codec.marshalling.CompatibleMarshallingEncoder (netty) + 'org.jboss.marshalling.Marshaller', + + // from io.netty.handler.codec.marshalling.ContextBoundUnmarshallerProvider (netty) + 'org.jboss.marshalling.MarshallerFactory', + 'org.jboss.marshalling.MarshallingConfiguration', + 'org.jboss.marshalling.Unmarshaller', + + // from io.netty.util.internal.logging.InternalLoggerFactory (netty) - it's optional + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory', + + 'com.google.protobuf.ExtensionRegistryLite', + 'com.google.protobuf.MessageLiteOrBuilder', + 'com.google.protobuf.nano.CodedOutputByteBufferNano', + 'com.google.protobuf.nano.MessageNano', + 'com.jcraft.jzlib.Deflater', + 'com.jcraft.jzlib.Inflater', + 'com.jcraft.jzlib.JZlib$WrapperType', + 'com.jcraft.jzlib.JZlib', + 'com.ning.compress.BufferRecycler', + 'com.ning.compress.lzf.ChunkDecoder', + 'com.ning.compress.lzf.ChunkEncoder', + 'com.ning.compress.lzf.LZFEncoder', + 'com.ning.compress.lzf.util.ChunkDecoderFactory', + 'com.ning.compress.lzf.util.ChunkEncoderFactory', + 'lzma.sdk.lzma.Encoder', + 'net.jpountz.lz4.LZ4Compressor', + 'net.jpountz.lz4.LZ4Factory', + 'net.jpountz.lz4.LZ4FastDecompressor', + 'net.jpountz.xxhash.StreamingXXHash32', + 'net.jpountz.xxhash.XXHashFactory', + 'io.netty.internal.tcnative.CertificateRequestedCallback', + 'io.netty.internal.tcnative.CertificateRequestedCallback$KeyMaterial', + 'io.netty.internal.tcnative.CertificateVerifier', + 'io.netty.internal.tcnative.SessionTicketKey', + 'io.netty.internal.tcnative.SniHostNameMatcher', + 'org.eclipse.jetty.alpn.ALPN$ClientProvider', + 'org.eclipse.jetty.alpn.ALPN$ServerProvider', + 'org.eclipse.jetty.alpn.ALPN', + + 'io.netty.handler.ssl.util.OpenJdkSelfSignedCertGenerator', + 'io.netty.util.internal.PlatformDependent0', + 'io.netty.util.internal.PlatformDependent0$1', + 'io.netty.util.internal.PlatformDependent0$2', + 'io.netty.util.internal.PlatformDependent0$3', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueConsumerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseLinkedQueueProducerNodeRef', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueColdProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueConsumerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.BaseMpscLinkedArrayQueueProducerFields', + 'io.netty.util.internal.shaded.org.jctools.queues.LinkedQueueNode', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueConsumerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerIndexField', + 'io.netty.util.internal.shaded.org.jctools.queues.MpscArrayQueueProducerLimitField', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeAccess', + 'io.netty.util.internal.shaded.org.jctools.util.UnsafeRefArrayAccess', + + 'org.conscrypt.AllocatedBuffer', + 'org.conscrypt.BufferAllocator', + 'org.conscrypt.Conscrypt$Engines', + 'org.conscrypt.HandshakeListener' +] \ No newline at end of file diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java new file mode 100644 index 0000000000000..b4108b3e6c7d0 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/ByteBufUtils.java @@ -0,0 +1,252 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.Unpooled; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.BytesRefIterator; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.EOFException; +import java.io.IOException; +import java.io.OutputStream; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +class ByteBufUtils { + + /** + * Turns the given BytesReference into a ByteBuf. Note: the returned ByteBuf will reference the internal + * pages of the BytesReference. Don't free the bytes of reference before the ByteBuf goes out of scope. + */ + static ByteBuf toByteBuf(final BytesReference reference) { + if (reference.length() == 0) { + return Unpooled.EMPTY_BUFFER; + } + if (reference instanceof ByteBufBytesReference) { + return ((ByteBufBytesReference) reference).toByteBuf(); + } else { + final BytesRefIterator iterator = reference.iterator(); + // usually we have one, two, or three components from the header, the message, and a buffer + final List buffers = new ArrayList<>(3); + try { + BytesRef slice; + while ((slice = iterator.next()) != null) { + buffers.add(Unpooled.wrappedBuffer(slice.bytes, slice.offset, slice.length)); + } + final CompositeByteBuf composite = Unpooled.compositeBuffer(buffers.size()); + composite.addComponents(true, buffers); + return composite; + } catch (IOException ex) { + throw new AssertionError("no IO happens here", ex); + } + } + } + + static BytesReference toBytesReference(final ByteBuf buffer) { + return new ByteBufBytesReference(buffer, buffer.readableBytes()); + } + + private static class ByteBufBytesReference extends BytesReference { + + private final ByteBuf buffer; + private final int length; + private final int offset; + + ByteBufBytesReference(ByteBuf buffer, int length) { + this.buffer = buffer; + this.length = length; + this.offset = buffer.readerIndex(); + assert length <= buffer.readableBytes() : "length[" + length +"] > " + buffer.readableBytes(); + } + + @Override + public byte get(int index) { + return buffer.getByte(offset + index); + } + + @Override + public int length() { + return length; + } + + @Override + public BytesReference slice(int from, int length) { + return new ByteBufBytesReference(buffer.slice(offset + from, length), length); + } + + @Override + public StreamInput streamInput() { + return new ByteBufStreamInput(buffer.duplicate(), length); + } + + @Override + public void writeTo(OutputStream os) throws IOException { + buffer.getBytes(offset, os, length); + } + + ByteBuf toByteBuf() { + return buffer.duplicate(); + } + + @Override + public String utf8ToString() { + return buffer.toString(offset, length, StandardCharsets.UTF_8); + } + + @Override + public BytesRef toBytesRef() { + if (buffer.hasArray()) { + return new BytesRef(buffer.array(), buffer.arrayOffset() + offset, length); + } + final byte[] copy = new byte[length]; + buffer.getBytes(offset, copy); + return new BytesRef(copy); + } + + @Override + public long ramBytesUsed() { + return buffer.capacity(); + } + + } + + private static class ByteBufStreamInput extends StreamInput { + + private final ByteBuf buffer; + private final int endIndex; + + ByteBufStreamInput(ByteBuf buffer, int length) { + if (length > buffer.readableBytes()) { + throw new IndexOutOfBoundsException(); + } + this.buffer = buffer; + int startIndex = buffer.readerIndex(); + endIndex = startIndex + length; + buffer.markReaderIndex(); + } + + @Override + public BytesReference readBytesReference(int length) throws IOException { + // NOTE: It is unsafe to share a reference of the internal structure, so we + // use the default implementation which will copy the bytes. It is unsafe because + // a netty ByteBuf might be pooled which requires a manual release to prevent + // memory leaks. + return super.readBytesReference(length); + } + + @Override + public BytesRef readBytesRef(int length) throws IOException { + // NOTE: It is unsafe to share a reference of the internal structure, so we + // use the default implementation which will copy the bytes. It is unsafe because + // a netty ByteBuf might be pooled which requires a manual release to prevent + // memory leaks. + return super.readBytesRef(length); + } + + @Override + public int available() throws IOException { + return endIndex - buffer.readerIndex(); + } + + @Override + protected void ensureCanReadBytes(int length) throws EOFException { + int bytesAvailable = endIndex - buffer.readerIndex(); + if (bytesAvailable < length) { + throw new EOFException("tried to read: " + length + " bytes but only " + bytesAvailable + " remaining"); + } + } + + @Override + public void mark(int readlimit) { + buffer.markReaderIndex(); + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public int read() throws IOException { + if (available() == 0) { + return -1; + } + return buffer.readByte() & 0xff; + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + if (len == 0) { + return 0; + } + int available = available(); + if (available == 0) { + return -1; + } + + len = Math.min(available, len); + buffer.readBytes(b, off, len); + return len; + } + + @Override + public void reset() throws IOException { + buffer.resetReaderIndex(); + } + + @Override + public long skip(long n) throws IOException { + if (n > Integer.MAX_VALUE) { + return skipBytes(Integer.MAX_VALUE); + } else { + return skipBytes((int) n); + } + } + + public int skipBytes(int n) throws IOException { + int nBytes = Math.min(available(), n); + buffer.skipBytes(nBytes); + return nBytes; + } + + + @Override + public byte readByte() throws IOException { + return buffer.readByte(); + } + + @Override + public void readBytes(byte[] b, int offset, int len) throws IOException { + int read = read(b, offset, len); + if (read < len) { + throw new IndexOutOfBoundsException(); + } + } + + @Override + public void close() throws IOException { + // nothing to do here + } + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java new file mode 100644 index 0000000000000..f1d18ddacbd13 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpReadWriteHandler.java @@ -0,0 +1,225 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContentCompressor; +import io.netty.handler.codec.http.HttpContentDecompressor; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpResponseEncoder; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.ReadWriteHandler; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.WriteOperation; +import org.elasticsearch.rest.RestRequest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.function.BiConsumer; + +public class HttpReadWriteHandler implements ReadWriteHandler { + + private final NettyAdaptor adaptor; + private final NioSocketChannel nioChannel; + private final NioHttpServerTransport transport; + private final HttpHandlingSettings settings; + private final NamedXContentRegistry xContentRegistry; + private final ThreadContext threadContext; + + HttpReadWriteHandler(NioSocketChannel nioChannel, NioHttpServerTransport transport, HttpHandlingSettings settings, + NamedXContentRegistry xContentRegistry, ThreadContext threadContext) { + this.nioChannel = nioChannel; + this.transport = transport; + this.settings = settings; + this.xContentRegistry = xContentRegistry; + this.threadContext = threadContext; + + List handlers = new ArrayList<>(5); + HttpRequestDecoder decoder = new HttpRequestDecoder(settings.getMaxInitialLineLength(), settings.getMaxHeaderSize(), + settings.getMaxChunkSize()); + decoder.setCumulator(ByteToMessageDecoder.COMPOSITE_CUMULATOR); + handlers.add(decoder); + handlers.add(new HttpContentDecompressor()); + handlers.add(new HttpResponseEncoder()); + handlers.add(new HttpObjectAggregator(settings.getMaxContentLength())); + if (settings.isCompression()) { + handlers.add(new HttpContentCompressor(settings.getCompressionLevel())); + } + + adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0])); + adaptor.addCloseListener((v, e) -> nioChannel.close()); + } + + @Override + public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { + int bytesConsumed = adaptor.read(channelBuffer.sliceBuffersTo(channelBuffer.getIndex())); + Object message; + while ((message = adaptor.pollInboundMessage()) != null) { + handleRequest(message); + } + + return bytesConsumed; + } + + @Override + public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer listener) { + assert message instanceof FullHttpResponse : "This channel only supports messages that are of type: " + FullHttpResponse.class + + ". Found type: " + message.getClass() + "."; + return new HttpWriteOperation(context, (FullHttpResponse) message, listener); + } + + @Override + public List writeToBytes(WriteOperation writeOperation) { + adaptor.write(writeOperation); + return pollFlushOperations(); + } + + @Override + public List pollFlushOperations() { + ArrayList copiedOperations = new ArrayList<>(adaptor.getOutboundCount()); + FlushOperation flushOperation; + while ((flushOperation = adaptor.pollOutboundOperation()) != null) { + copiedOperations.add(flushOperation); + } + return copiedOperations; + } + + @Override + public void close() throws IOException { + try { + adaptor.close(); + } catch (Exception e) { + throw new IOException(e); + } + } + + private void handleRequest(Object msg) { + final FullHttpRequest request = (FullHttpRequest) msg; + + final FullHttpRequest copiedRequest = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + Unpooled.copiedBuffer(request.content()), + request.headers(), + request.trailingHeaders()); + + Exception badRequestCause = null; + + /* + * We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there + * are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we + * attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header, + * or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the + * underlying exception that caused us to treat the request as bad. + */ + final NioHttpRequest httpRequest; + { + NioHttpRequest innerHttpRequest; + try { + innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest); + } catch (final RestRequest.ContentTypeHeaderException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause); + } catch (final RestRequest.BadParameterException e) { + badRequestCause = e; + innerHttpRequest = requestWithoutParameters(copiedRequest); + } + httpRequest = innerHttpRequest; + } + + /* + * We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid + * parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an + * IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these + * parameter values. + */ + final NioHttpChannel channel; + { + NioHttpChannel innerChannel; + try { + innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), httpRequest, settings, threadContext); + } catch (final IllegalArgumentException e) { + if (badRequestCause == null) { + badRequestCause = e; + } else { + badRequestCause.addSuppressed(e); + } + final NioHttpRequest innerRequest = + new NioHttpRequest( + xContentRegistry, + Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters + copiedRequest.uri(), + copiedRequest); + innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), innerRequest, settings, threadContext); + } + channel = innerChannel; + } + + if (request.decoderResult().isFailure()) { + transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause()); + } else if (badRequestCause != null) { + transport.dispatchBadRequest(httpRequest, channel, badRequestCause); + } else { + transport.dispatchRequest(httpRequest, channel); + } + } + + private NioHttpRequest requestWithoutContentTypeHeader(final FullHttpRequest request, final Exception badRequestCause) { + final HttpHeaders headersWithoutContentTypeHeader = new DefaultHttpHeaders(); + headersWithoutContentTypeHeader.add(request.headers()); + headersWithoutContentTypeHeader.remove("Content-Type"); + final FullHttpRequest requestWithoutContentTypeHeader = + new DefaultFullHttpRequest( + request.protocolVersion(), + request.method(), + request.uri(), + request.content(), + headersWithoutContentTypeHeader, // remove the Content-Type header so as to not parse it again + request.trailingHeaders()); // Content-Type can not be a trailing header + try { + return new NioHttpRequest(xContentRegistry, requestWithoutContentTypeHeader); + } catch (final RestRequest.BadParameterException e) { + badRequestCause.addSuppressed(e); + return requestWithoutParameters(requestWithoutContentTypeHeader); + } + } + + private NioHttpRequest requestWithoutParameters(final FullHttpRequest request) { + // remove all parameters as at least one is incorrectly encoded + return new NioHttpRequest(xContentRegistry, Collections.emptyMap(), request.uri(), request); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java new file mode 100644 index 0000000000000..c838ae85e9d40 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/HttpWriteOperation.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.handler.codec.http.FullHttpResponse; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.WriteOperation; + +import java.util.function.BiConsumer; + +public class HttpWriteOperation implements WriteOperation { + + private final SocketChannelContext channelContext; + private final FullHttpResponse response; + private final BiConsumer listener; + + HttpWriteOperation(SocketChannelContext channelContext, FullHttpResponse response, BiConsumer listener) { + this.channelContext = channelContext; + this.response = response; + this.listener = listener; + } + + @Override + public BiConsumer getListener() { + return listener; + } + + @Override + public SocketChannelContext getChannel() { + return channelContext; + } + + @Override + public FullHttpResponse getObject() { + return response; + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java new file mode 100644 index 0000000000000..3344a31264121 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyAdaptor.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.embedded.EmbeddedChannel; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.nio.WriteOperation; + +import java.nio.ByteBuffer; +import java.util.LinkedList; +import java.util.function.BiConsumer; + +public class NettyAdaptor implements AutoCloseable { + + private final EmbeddedChannel nettyChannel; + private final LinkedList flushOperations = new LinkedList<>(); + + NettyAdaptor(ChannelHandler... handlers) { + nettyChannel = new EmbeddedChannel(); + nettyChannel.pipeline().addLast("write_captor", new ChannelOutboundHandlerAdapter() { + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { + // This is a little tricky. The embedded channel will complete the promise once it writes the message + // to its outbound buffer. We do not want to complete the promise until the message is sent. So we + // intercept the promise and pass a different promise back to the rest of the pipeline. + + try { + ByteBuf message = (ByteBuf) msg; + promise.addListener((f) -> message.release()); + NettyListener listener; + if (promise instanceof NettyListener) { + listener = (NettyListener) promise; + } else { + listener = new NettyListener(promise); + } + flushOperations.add(new FlushOperation(message.nioBuffers(), listener)); + } catch (Exception e) { + promise.setFailure(e); + } + } + }); + nettyChannel.pipeline().addLast(handlers); + } + + @Override + public void close() throws Exception { + assert flushOperations.isEmpty() : "Should close outbound operations before calling close"; + + ChannelFuture closeFuture = nettyChannel.close(); + // This should be safe as we are not a real network channel + closeFuture.await(); + if (closeFuture.isSuccess() == false) { + Throwable cause = closeFuture.cause(); + ExceptionsHelper.dieOnError(cause); + throw (Exception) cause; + } + } + + public void addCloseListener(BiConsumer listener) { + nettyChannel.closeFuture().addListener(f -> { + if (f.isSuccess()) { + listener.accept(null, null); + } else { + final Throwable cause = f.cause(); + ExceptionsHelper.dieOnError(cause); + assert cause instanceof Exception; + listener.accept(null, (Exception) cause); + } + }); + } + + public int read(ByteBuffer[] buffers) { + ByteBuf byteBuf = Unpooled.wrappedBuffer(buffers); + int initialReaderIndex = byteBuf.readerIndex(); + nettyChannel.writeInbound(byteBuf); + return byteBuf.readerIndex() - initialReaderIndex; + } + + public Object pollInboundMessage() { + return nettyChannel.readInbound(); + } + + public void write(WriteOperation writeOperation) { + ChannelPromise channelPromise = nettyChannel.newPromise(); + channelPromise.addListener(f -> { + BiConsumer consumer = writeOperation.getListener(); + if (f.cause() == null) { + consumer.accept(null, null); + } else { + ExceptionsHelper.dieOnError(f.cause()); + consumer.accept(null, f.cause()); + } + }); + + nettyChannel.writeAndFlush(writeOperation.getObject(), new NettyListener(channelPromise)); + } + + public FlushOperation pollOutboundOperation() { + return flushOperations.pollFirst(); + } + + public int getOutboundCount() { + return flushOperations.size(); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java new file mode 100644 index 0000000000000..e806b0d23ce3a --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NettyListener.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelPromise; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.GenericFutureListener; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.util.concurrent.FutureUtils; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.BiConsumer; + +/** + * This is an {@link BiConsumer} that interfaces with netty code. It wraps a netty promise and will + * complete that promise when accept is called. It delegates the normal promise methods to the underlying + * promise. + */ +public class NettyListener implements BiConsumer, ChannelPromise { + + private final ChannelPromise promise; + + NettyListener(ChannelPromise promise) { + this.promise = promise; + } + + @Override + public void accept(Void v, Throwable throwable) { + if (throwable == null) { + promise.setSuccess(); + } else { + promise.setFailure(throwable); + } + } + + @Override + public Channel channel() { + return promise.channel(); + } + + @Override + public ChannelPromise setSuccess(Void result) { + return promise.setSuccess(result); + } + + @Override + public boolean trySuccess(Void result) { + return promise.trySuccess(result); + } + + @Override + public ChannelPromise setSuccess() { + return promise.setSuccess(); + } + + @Override + public boolean trySuccess() { + return promise.trySuccess(); + } + + @Override + public ChannelPromise setFailure(Throwable cause) { + return promise.setFailure(cause); + } + + @Override + public boolean tryFailure(Throwable cause) { + return promise.tryFailure(cause); + } + + @Override + public boolean setUncancellable() { + return promise.setUncancellable(); + } + + @Override + public boolean isSuccess() { + return promise.isSuccess(); + } + + @Override + public boolean isCancellable() { + return promise.isCancellable(); + } + + @Override + public Throwable cause() { + return promise.cause(); + } + + @Override + public ChannelPromise addListener(GenericFutureListener> listener) { + return promise.addListener(listener); + } + + @Override + @SafeVarargs + @SuppressWarnings("varargs") + public final ChannelPromise addListeners(GenericFutureListener>... listeners) { + return promise.addListeners(listeners); + } + + @Override + public ChannelPromise removeListener(GenericFutureListener> listener) { + return promise.removeListener(listener); + } + + @Override + @SafeVarargs + @SuppressWarnings("varargs") + public final ChannelPromise removeListeners(GenericFutureListener>... listeners) { + return promise.removeListeners(listeners); + } + + @Override + public ChannelPromise sync() throws InterruptedException { + return promise.sync(); + } + + @Override + public ChannelPromise syncUninterruptibly() { + return promise.syncUninterruptibly(); + } + + @Override + public ChannelPromise await() throws InterruptedException { + return promise.await(); + } + + @Override + public ChannelPromise awaitUninterruptibly() { + return promise.awaitUninterruptibly(); + } + + @Override + public boolean await(long timeout, TimeUnit unit) throws InterruptedException { + return promise.await(timeout, unit); + } + + @Override + public boolean await(long timeoutMillis) throws InterruptedException { + return promise.await(timeoutMillis); + } + + @Override + public boolean awaitUninterruptibly(long timeout, TimeUnit unit) { + return promise.awaitUninterruptibly(timeout, unit); + } + + @Override + public boolean awaitUninterruptibly(long timeoutMillis) { + return promise.awaitUninterruptibly(timeoutMillis); + } + + @Override + public Void getNow() { + return promise.getNow(); + } + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + return FutureUtils.cancel(promise); + } + + @Override + public boolean isCancelled() { + return promise.isCancelled(); + } + + @Override + public boolean isDone() { + return promise.isDone(); + } + + @Override + public Void get() throws InterruptedException, ExecutionException { + return promise.get(); + } + + @Override + public Void get(long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { + return promise.get(timeout, unit); + } + + @Override + public boolean isVoid() { + return promise.isVoid(); + } + + @Override + public ChannelPromise unvoid() { + return promise.unvoid(); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java new file mode 100644 index 0000000000000..672c6d5abad0e --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpChannel.java @@ -0,0 +1,254 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http.cookie.Cookie; +import io.netty.handler.codec.http.cookie.ServerCookieDecoder; +import io.netty.handler.codec.http.cookie.ServerCookieEncoder; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class NioHttpChannel extends AbstractRestChannel { + + private final BigArrays bigArrays; + private final ThreadContext threadContext; + private final FullHttpRequest nettyRequest; + private final NioSocketChannel nioChannel; + private final boolean resetCookies; + + NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, + HttpHandlingSettings settings, ThreadContext threadContext) { + super(request, settings.getDetailedErrorsEnabled()); + this.nioChannel = nioChannel; + this.bigArrays = bigArrays; + this.threadContext = threadContext; + this.nettyRequest = request.getRequest(); + this.resetCookies = settings.isResetCookies(); + } + + @Override + public void sendResponse(RestResponse response) { + // if the response object was created upstream, then use it; + // otherwise, create a new one + ByteBuf buffer = ByteBufUtils.toByteBuf(response.content()); + final FullHttpResponse resp; + if (HttpMethod.HEAD.equals(nettyRequest.method())) { + resp = newResponse(Unpooled.EMPTY_BUFFER); + } else { + resp = newResponse(buffer); + } + resp.setStatus(getStatus(response.status())); + + String opaque = nettyRequest.headers().get("X-Opaque-Id"); + if (opaque != null) { + setHeaderField(resp, "X-Opaque-Id", opaque); + } + + // Add all custom headers + addCustomHeaders(resp, response.getHeaders()); + addCustomHeaders(resp, threadContext.getResponseHeaders()); + + ArrayList toClose = new ArrayList<>(3); + + boolean success = false; + try { + // If our response doesn't specify a content-type header, set one + setHeaderField(resp, HttpHeaderNames.CONTENT_TYPE.toString(), response.contentType(), false); + // If our response has no content-length, calculate and set one + setHeaderField(resp, HttpHeaderNames.CONTENT_LENGTH.toString(), String.valueOf(buffer.readableBytes()), false); + + addCookies(resp); + + BytesReference content = response.content(); + if (content instanceof Releasable) { + toClose.add((Releasable) content); + } + BytesStreamOutput bytesStreamOutput = bytesOutputOrNull(); + if (bytesStreamOutput instanceof ReleasableBytesStreamOutput) { + toClose.add((Releasable) bytesStreamOutput); + } + + if (isCloseConnection()) { + toClose.add(nioChannel::close); + } + + nioChannel.getContext().sendMessage(resp, (aVoid, throwable) -> { + Releasables.close(toClose); + }); + success = true; + } finally { + if (success == false) { + Releasables.close(toClose); + } + } + } + + @Override + protected BytesStreamOutput newBytesOutput() { + return new ReleasableBytesStreamOutput(bigArrays); + } + + private void setHeaderField(HttpResponse resp, String headerField, String value) { + setHeaderField(resp, headerField, value, true); + } + + private void setHeaderField(HttpResponse resp, String headerField, String value, boolean override) { + if (override || !resp.headers().contains(headerField)) { + resp.headers().add(headerField, value); + } + } + + private void addCookies(HttpResponse resp) { + if (resetCookies) { + String cookieString = nettyRequest.headers().get(HttpHeaderNames.COOKIE); + if (cookieString != null) { + Set cookies = ServerCookieDecoder.STRICT.decode(cookieString); + if (!cookies.isEmpty()) { + // Reset the cookies if necessary. + resp.headers().set(HttpHeaderNames.SET_COOKIE, ServerCookieEncoder.STRICT.encode(cookies)); + } + } + } + } + + private void addCustomHeaders(HttpResponse response, Map> customHeaders) { + if (customHeaders != null) { + for (Map.Entry> headerEntry : customHeaders.entrySet()) { + for (String headerValue : headerEntry.getValue()) { + setHeaderField(response, headerEntry.getKey(), headerValue); + } + } + } + } + + // Create a new {@link HttpResponse} to transmit the response for the netty request. + private FullHttpResponse newResponse(ByteBuf buffer) { + final boolean http10 = isHttp10(); + final boolean close = isCloseConnection(); + // Build the response object. + final HttpResponseStatus status = HttpResponseStatus.OK; // default to initialize + final FullHttpResponse response; + if (http10) { + response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_0, status, buffer); + if (!close) { + response.headers().add(HttpHeaderNames.CONNECTION, "Keep-Alive"); + } + } else { + response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, status, buffer); + } + return response; + } + + // Determine if the request protocol version is HTTP 1.0 + private boolean isHttp10() { + return nettyRequest.protocolVersion().equals(HttpVersion.HTTP_1_0); + } + + // Determine if the request connection should be closed on completion. + private boolean isCloseConnection() { + final boolean http10 = isHttp10(); + return HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION)) || + (http10 && !HttpHeaderValues.KEEP_ALIVE.contentEqualsIgnoreCase(nettyRequest.headers().get(HttpHeaderNames.CONNECTION))); + } + + private static Map MAP; + + static { + EnumMap map = new EnumMap<>(RestStatus.class); + map.put(RestStatus.CONTINUE, HttpResponseStatus.CONTINUE); + map.put(RestStatus.SWITCHING_PROTOCOLS, HttpResponseStatus.SWITCHING_PROTOCOLS); + map.put(RestStatus.OK, HttpResponseStatus.OK); + map.put(RestStatus.CREATED, HttpResponseStatus.CREATED); + map.put(RestStatus.ACCEPTED, HttpResponseStatus.ACCEPTED); + map.put(RestStatus.NON_AUTHORITATIVE_INFORMATION, HttpResponseStatus.NON_AUTHORITATIVE_INFORMATION); + map.put(RestStatus.NO_CONTENT, HttpResponseStatus.NO_CONTENT); + map.put(RestStatus.RESET_CONTENT, HttpResponseStatus.RESET_CONTENT); + map.put(RestStatus.PARTIAL_CONTENT, HttpResponseStatus.PARTIAL_CONTENT); + map.put(RestStatus.MULTI_STATUS, HttpResponseStatus.INTERNAL_SERVER_ERROR); // no status for this?? + map.put(RestStatus.MULTIPLE_CHOICES, HttpResponseStatus.MULTIPLE_CHOICES); + map.put(RestStatus.MOVED_PERMANENTLY, HttpResponseStatus.MOVED_PERMANENTLY); + map.put(RestStatus.FOUND, HttpResponseStatus.FOUND); + map.put(RestStatus.SEE_OTHER, HttpResponseStatus.SEE_OTHER); + map.put(RestStatus.NOT_MODIFIED, HttpResponseStatus.NOT_MODIFIED); + map.put(RestStatus.USE_PROXY, HttpResponseStatus.USE_PROXY); + map.put(RestStatus.TEMPORARY_REDIRECT, HttpResponseStatus.TEMPORARY_REDIRECT); + map.put(RestStatus.BAD_REQUEST, HttpResponseStatus.BAD_REQUEST); + map.put(RestStatus.UNAUTHORIZED, HttpResponseStatus.UNAUTHORIZED); + map.put(RestStatus.PAYMENT_REQUIRED, HttpResponseStatus.PAYMENT_REQUIRED); + map.put(RestStatus.FORBIDDEN, HttpResponseStatus.FORBIDDEN); + map.put(RestStatus.NOT_FOUND, HttpResponseStatus.NOT_FOUND); + map.put(RestStatus.METHOD_NOT_ALLOWED, HttpResponseStatus.METHOD_NOT_ALLOWED); + map.put(RestStatus.NOT_ACCEPTABLE, HttpResponseStatus.NOT_ACCEPTABLE); + map.put(RestStatus.PROXY_AUTHENTICATION, HttpResponseStatus.PROXY_AUTHENTICATION_REQUIRED); + map.put(RestStatus.REQUEST_TIMEOUT, HttpResponseStatus.REQUEST_TIMEOUT); + map.put(RestStatus.CONFLICT, HttpResponseStatus.CONFLICT); + map.put(RestStatus.GONE, HttpResponseStatus.GONE); + map.put(RestStatus.LENGTH_REQUIRED, HttpResponseStatus.LENGTH_REQUIRED); + map.put(RestStatus.PRECONDITION_FAILED, HttpResponseStatus.PRECONDITION_FAILED); + map.put(RestStatus.REQUEST_ENTITY_TOO_LARGE, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + map.put(RestStatus.REQUEST_URI_TOO_LONG, HttpResponseStatus.REQUEST_URI_TOO_LONG); + map.put(RestStatus.UNSUPPORTED_MEDIA_TYPE, HttpResponseStatus.UNSUPPORTED_MEDIA_TYPE); + map.put(RestStatus.REQUESTED_RANGE_NOT_SATISFIED, HttpResponseStatus.REQUESTED_RANGE_NOT_SATISFIABLE); + map.put(RestStatus.EXPECTATION_FAILED, HttpResponseStatus.EXPECTATION_FAILED); + map.put(RestStatus.UNPROCESSABLE_ENTITY, HttpResponseStatus.BAD_REQUEST); + map.put(RestStatus.LOCKED, HttpResponseStatus.BAD_REQUEST); + map.put(RestStatus.FAILED_DEPENDENCY, HttpResponseStatus.BAD_REQUEST); + map.put(RestStatus.TOO_MANY_REQUESTS, HttpResponseStatus.TOO_MANY_REQUESTS); + map.put(RestStatus.INTERNAL_SERVER_ERROR, HttpResponseStatus.INTERNAL_SERVER_ERROR); + map.put(RestStatus.NOT_IMPLEMENTED, HttpResponseStatus.NOT_IMPLEMENTED); + map.put(RestStatus.BAD_GATEWAY, HttpResponseStatus.BAD_GATEWAY); + map.put(RestStatus.SERVICE_UNAVAILABLE, HttpResponseStatus.SERVICE_UNAVAILABLE); + map.put(RestStatus.GATEWAY_TIMEOUT, HttpResponseStatus.GATEWAY_TIMEOUT); + map.put(RestStatus.HTTP_VERSION_NOT_SUPPORTED, HttpResponseStatus.HTTP_VERSION_NOT_SUPPORTED); + MAP = Collections.unmodifiableMap(map); + } + + private static HttpResponseStatus getStatus(RestStatus status) { + return MAP.getOrDefault(status, HttpResponseStatus.INTERNAL_SERVER_ERROR); + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java new file mode 100644 index 0000000000000..b5bfcc6b0cca2 --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpRequest.java @@ -0,0 +1,186 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.rest.RestRequest; + +import java.util.AbstractMap; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class NioHttpRequest extends RestRequest { + + private final FullHttpRequest request; + private final BytesReference content; + + NioHttpRequest(NamedXContentRegistry xContentRegistry, FullHttpRequest request) { + super(xContentRegistry, request.uri(), new HttpHeadersMap(request.headers())); + this.request = request; + if (request.content().isReadable()) { + this.content = ByteBufUtils.toBytesReference(request.content()); + } else { + this.content = BytesArray.EMPTY; + } + + } + + NioHttpRequest(NamedXContentRegistry xContentRegistry, Map params, String uri, FullHttpRequest request) { + super(xContentRegistry, params, uri, new HttpHeadersMap(request.headers())); + this.request = request; + if (request.content().isReadable()) { + this.content = ByteBufUtils.toBytesReference(request.content()); + } else { + this.content = BytesArray.EMPTY; + } + } + + @Override + public Method method() { + HttpMethod httpMethod = request.method(); + if (httpMethod == HttpMethod.GET) + return Method.GET; + + if (httpMethod == HttpMethod.POST) + return Method.POST; + + if (httpMethod == HttpMethod.PUT) + return Method.PUT; + + if (httpMethod == HttpMethod.DELETE) + return Method.DELETE; + + if (httpMethod == HttpMethod.HEAD) { + return Method.HEAD; + } + + if (httpMethod == HttpMethod.OPTIONS) { + return Method.OPTIONS; + } + + return Method.GET; + } + + @Override + public String uri() { + return request.uri(); + } + + @Override + public boolean hasContent() { + return content.length() > 0; + } + + @Override + public BytesReference content() { + return content; + } + + public FullHttpRequest getRequest() { + return request; + } + + /** + * A wrapper of {@link HttpHeaders} that implements a map to prevent copying unnecessarily. This class does not support modifications + * and due to the underlying implementation, it performs case insensitive lookups of key to values. + * + * It is important to note that this implementation does have some downsides in that each invocation of the + * {@link #values()} and {@link #entrySet()} methods will perform a copy of the values in the HttpHeaders rather than returning a + * view of the underlying values. + */ + private static class HttpHeadersMap implements Map> { + + private final HttpHeaders httpHeaders; + + private HttpHeadersMap(HttpHeaders httpHeaders) { + this.httpHeaders = httpHeaders; + } + + @Override + public int size() { + return httpHeaders.size(); + } + + @Override + public boolean isEmpty() { + return httpHeaders.isEmpty(); + } + + @Override + public boolean containsKey(Object key) { + return key instanceof String && httpHeaders.contains((String) key); + } + + @Override + public boolean containsValue(Object value) { + return value instanceof List && httpHeaders.names().stream().map(httpHeaders::getAll).anyMatch(value::equals); + } + + @Override + public List get(Object key) { + return key instanceof String ? httpHeaders.getAll((String) key) : null; + } + + @Override + public List put(String key, List value) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public List remove(Object key) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void putAll(Map> m) { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public void clear() { + throw new UnsupportedOperationException("modifications are not supported"); + } + + @Override + public Set keySet() { + return httpHeaders.names(); + } + + @Override + public Collection> values() { + return httpHeaders.names().stream().map(k -> Collections.unmodifiableList(httpHeaders.getAll(k))).collect(Collectors.toList()); + } + + @Override + public Set>> entrySet() { + return httpHeaders.names().stream().map(k -> new AbstractMap.SimpleImmutableEntry<>(k, httpHeaders.getAll(k))) + .collect(Collectors.toSet()); + } + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java new file mode 100644 index 0000000000000..bdbee715bd0cf --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/http/nio/NioHttpServerTransport.java @@ -0,0 +1,322 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.handler.timeout.ReadTimeoutException; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.logging.log4j.util.Supplier; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.network.NetworkAddress; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.NetworkExceptionHelper; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.BindHttpException; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpStats; +import org.elasticsearch.http.netty4.AbstractHttpServerTransport; +import org.elasticsearch.nio.AcceptingSelector; +import org.elasticsearch.nio.AcceptorEventHandler; +import org.elasticsearch.nio.BytesChannelContext; +import org.elasticsearch.nio.ChannelFactory; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.NioChannel; +import org.elasticsearch.nio.NioGroup; +import org.elasticsearch.nio.NioServerSocketChannel; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.ServerChannelContext; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.nio.SocketEventHandler; +import org.elasticsearch.nio.SocketSelector; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; + +import static org.elasticsearch.common.settings.Setting.intSetting; +import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE; + +public class NioHttpServerTransport extends AbstractHttpServerTransport { + + public static final Setting NIO_HTTP_ACCEPTOR_COUNT = + intSetting("http.nio.acceptor_count", 1, 1, Setting.Property.NodeScope); + public static final Setting NIO_HTTP_WORKER_COUNT = + new Setting<>("http.nio.worker_count", + (s) -> Integer.toString(EsExecutors.numberOfProcessors(s) * 2), + (s) -> Setting.parseInt(s, 1, "http.nio.worker_count"), Setting.Property.NodeScope); + + private static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = "http_nio_transport_worker"; + private static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = "http_nio_transport_acceptor"; + + private final BigArrays bigArrays; + private final ThreadPool threadPool; + private final NamedXContentRegistry xContentRegistry; + + private final HttpHandlingSettings httpHandlingSettings; + + private final boolean tcpNoDelay; + private final boolean tcpKeepAlive; + private final boolean reuseAddress; + private final int tcpSendBufferSize; + private final int tcpReceiveBufferSize; + + private final Set serverChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private final Set socketChannels = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private NioGroup nioGroup; + private HttpChannelFactory channelFactory; + + public NioHttpServerTransport(Settings settings, NetworkService networkService, BigArrays bigArrays, ThreadPool threadPool, + NamedXContentRegistry xContentRegistry, HttpServerTransport.Dispatcher dispatcher) { + super(settings, networkService, threadPool, dispatcher); + this.bigArrays = bigArrays; + this.threadPool = threadPool; + this.xContentRegistry = xContentRegistry; + + ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings); + ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings); + ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings); + this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()), + Math.toIntExact(maxChunkSize.getBytes()), + Math.toIntExact(maxHeaderSize.getBytes()), + Math.toIntExact(maxInitialLineLength.getBytes()), + SETTING_HTTP_RESET_COOKIES.get(settings), + SETTING_HTTP_COMPRESSION.get(settings), + SETTING_HTTP_COMPRESSION_LEVEL.get(settings), + SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings)); + + this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings); + this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings); + this.reuseAddress = SETTING_HTTP_TCP_REUSE_ADDRESS.get(settings); + this.tcpSendBufferSize = Math.toIntExact(SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings).getBytes()); + this.tcpReceiveBufferSize = Math.toIntExact(SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes()); + + + logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]", + maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength); + } + + BigArrays getBigArrays() { + return bigArrays; + } + + @Override + protected void doStart() { + boolean success = false; + try { + int acceptorCount = NIO_HTTP_ACCEPTOR_COUNT.get(settings); + int workerCount = NIO_HTTP_WORKER_COUNT.get(settings); + nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount, + AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), + workerCount, SocketEventHandler::new); + channelFactory = new HttpChannelFactory(); + this.boundAddress = createBoundHttpAddress(); + + if (logger.isInfoEnabled()) { + logger.info("{}", boundAddress); + } + + success = true; + } catch (IOException e) { + throw new ElasticsearchException(e); + } finally { + if (success == false) { + doStop(); // otherwise we leak threads since we never moved to started + } + } + } + + @Override + protected void doStop() { + synchronized (serverChannels) { + if (serverChannels.isEmpty() == false) { + try { + closeChannels(new ArrayList<>(serverChannels)); + } catch (Exception e) { + logger.error("unexpected exception while closing http server channels", e); + } + serverChannels.clear(); + } + } + + try { + closeChannels(new ArrayList<>(socketChannels)); + } catch (Exception e) { + logger.warn("unexpected exception while closing http channels", e); + } + socketChannels.clear(); + + try { + nioGroup.close(); + } catch (Exception e) { + logger.warn("unexpected exception while stopping nio group", e); + } + } + + @Override + protected void doClose() throws IOException { + } + + @Override + protected TransportAddress bindAddress(InetAddress hostAddress) { + final AtomicReference lastException = new AtomicReference<>(); + final AtomicReference boundSocket = new AtomicReference<>(); + boolean success = port.iterate(portNumber -> { + try { + synchronized (serverChannels) { + InetSocketAddress address = new InetSocketAddress(hostAddress, portNumber); + NioServerSocketChannel channel = nioGroup.bindServerChannel(address, channelFactory); + serverChannels.add(channel); + boundSocket.set(channel.getLocalAddress()); + } + } catch (Exception e) { + lastException.set(e); + return false; + } + return true; + }); + if (success == false) { + throw new BindHttpException("Failed to bind to [" + port.getPortRangeString() + "]", lastException.get()); + } + + if (logger.isDebugEnabled()) { + logger.debug("Bound http to address {{}}", NetworkAddress.format(boundSocket.get())); + } + return new TransportAddress(boundSocket.get()); + } + + @Override + public HttpStats stats() { + return new HttpStats(serverChannels.size(), socketChannels.size()); + } + + protected void exceptionCaught(NioSocketChannel channel, Exception cause) { + if (cause instanceof ReadTimeoutException) { + if (logger.isTraceEnabled()) { + logger.trace("Read timeout [{}]", channel.getRemoteAddress()); + } + channel.close(); + } else { + if (lifecycle.started() == false) { + // ignore + return; + } + if (NetworkExceptionHelper.isCloseConnectionException(cause) == false) { + logger.warn( + (Supplier) () -> new ParameterizedMessage( + "caught exception while handling client http traffic, closing connection {}", channel), + cause); + channel.close(); + } else { + logger.debug( + (Supplier) () -> new ParameterizedMessage( + "caught exception while handling client http traffic, closing connection {}", channel), + cause); + channel.close(); + } + } + } + + private void closeChannels(List channels) { + List> futures = new ArrayList<>(channels.size()); + + for (NioChannel channel : channels) { + PlainActionFuture future = PlainActionFuture.newFuture(); + channel.addCloseListener(ActionListener.toBiConsumer(future)); + futures.add(future); + channel.close(); + } + + List closeExceptions = new ArrayList<>(); + for (ActionFuture f : futures) { + try { + f.actionGet(); + } catch (RuntimeException e) { + closeExceptions.add(e); + } + } + + ExceptionsHelper.rethrowAndSuppress(closeExceptions); + } + + private void acceptChannel(NioSocketChannel socketChannel) { + socketChannels.add(socketChannel); + } + + private class HttpChannelFactory extends ChannelFactory { + + private HttpChannelFactory() { + super(new RawChannelFactory(tcpNoDelay, tcpKeepAlive, reuseAddress, tcpSendBufferSize, tcpReceiveBufferSize)); + } + + @Override + public NioSocketChannel createChannel(SocketSelector selector, SocketChannel channel) throws IOException { + NioSocketChannel nioChannel = new NioSocketChannel(channel); + HttpReadWriteHandler httpReadWritePipeline = new HttpReadWriteHandler(nioChannel,NioHttpServerTransport.this, + httpHandlingSettings, xContentRegistry, threadPool.getThreadContext()); + Consumer exceptionHandler = (e) -> exceptionCaught(nioChannel, e); + SocketChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, httpReadWritePipeline, + InboundChannelBuffer.allocatingInstance()); + nioChannel.setContext(context); + return nioChannel; + } + + @Override + public NioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException { + NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel); + ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, NioHttpServerTransport.this::acceptChannel, + (e) -> {}); + nioChannel.setContext(context); + return nioChannel; + } + + } +} diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java index eb3d7f3d710dc..9d794f951c8d2 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransport.java @@ -21,7 +21,6 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; @@ -39,7 +38,6 @@ import org.elasticsearch.nio.NioGroup; import org.elasticsearch.nio.NioSocketChannel; import org.elasticsearch.nio.ServerChannelContext; -import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.nio.SocketEventHandler; import org.elasticsearch.nio.SocketSelector; import org.elasticsearch.threadpool.ThreadPool; @@ -184,10 +182,9 @@ public TcpNioSocketChannel createChannel(SocketSelector selector, SocketChannel Recycler.V bytes = pageCacheRecycler.bytePage(false); return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes::close); }; - SocketChannelContext.ReadConsumer nioReadConsumer = channelBuffer -> - consumeNetworkReads(nioChannel, BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex()))); + TcpReadWriteHandler readWriteHandler = new TcpReadWriteHandler(nioChannel, NioTransport.this); Consumer exceptionHandler = (e) -> exceptionCaught(nioChannel, e); - BytesChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, nioReadConsumer, + BytesChannelContext context = new BytesChannelContext(nioChannel, selector, exceptionHandler, readWriteHandler, new InboundChannelBuffer(pageSupplier)); nioChannel.setContext(context); return nioChannel; diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java index 029507a5ba49d..422e3e9b83330 100644 --- a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/NioTransportPlugin.java @@ -19,14 +19,15 @@ package org.elasticsearch.transport.nio; -import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.nio.NioHttpServerTransport; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; @@ -42,10 +43,13 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin { public static final String NIO_TRANSPORT_NAME = "nio-transport"; + public static final String NIO_HTTP_TRANSPORT_NAME = "nio-http-transport"; @Override public List> getSettings() { return Arrays.asList( + NioHttpServerTransport.NIO_HTTP_ACCEPTOR_COUNT, + NioHttpServerTransport.NIO_HTTP_WORKER_COUNT, NioTransport.NIO_WORKER_COUNT, NioTransport.NIO_ACCEPTOR_COUNT ); @@ -61,4 +65,15 @@ public Map> getTransports(Settings settings, ThreadP () -> new NioTransport(settings, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry, circuitBreakerService)); } + + @Override + public Map> getHttpTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NamedXContentRegistry xContentRegistry, + NetworkService networkService, + HttpServerTransport.Dispatcher dispatcher) { + return Collections.singletonMap(NIO_HTTP_TRANSPORT_NAME, + () -> new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry, dispatcher)); + } } diff --git a/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadWriteHandler.java b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadWriteHandler.java new file mode 100644 index 0000000000000..f2d07b180855c --- /dev/null +++ b/plugins/transport-nio/src/main/java/org/elasticsearch/transport/nio/TcpReadWriteHandler.java @@ -0,0 +1,44 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport.nio; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.nio.BytesWriteHandler; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.transport.TcpTransport; + +import java.io.IOException; + +public class TcpReadWriteHandler extends BytesWriteHandler { + + private final TcpNioSocketChannel channel; + private final TcpTransport transport; + + public TcpReadWriteHandler(TcpNioSocketChannel channel, TcpTransport transport) { + this.channel = channel; + this.transport = transport; + } + + @Override + public int consumeReads(InboundChannelBuffer channelBuffer) throws IOException { + BytesReference bytesReference = BytesReference.fromByteBuffers(channelBuffer.sliceBuffersTo(channelBuffer.getIndex())); + return transport.consumeNetworkReads(channel, bytesReference); + } +} diff --git a/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy b/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy index 2dbe07bd8a5c6..8c8fe7c327412 100644 --- a/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/transport-nio/src/main/plugin-metadata/plugin-security.policy @@ -21,3 +21,9 @@ grant codeBase "${codebase.elasticsearch-nio}" { // elasticsearch-nio makes and accepts socket connections permission java.net.SocketPermission "*", "accept,connect"; }; + +grant codeBase "${codebase.netty-common}" { + // This should only currently be required as we use the netty http client for tests + // netty makes and accepts socket connections + permission java.net.SocketPermission "*", "accept,connect"; +}; diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java new file mode 100644 index 0000000000000..dce8319d2fc82 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/HttpReadWriteHandlerTests.java @@ -0,0 +1,241 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.http.HttpHandlingSettings; +import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.nio.InboundChannelBuffer; +import org.elasticsearch.nio.NioSocketChannel; +import org.elasticsearch.nio.SocketChannelContext; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.function.BiConsumer; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_COMPRESSION_LEVEL; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_DETAILED_ERRORS_ENABLED; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyZeroInteractions; + +public class HttpReadWriteHandlerTests extends ESTestCase { + + private HttpReadWriteHandler handler; + private NioSocketChannel nioSocketChannel; + private NioHttpServerTransport transport; + + private final RequestEncoder requestEncoder = new RequestEncoder(); + private final ResponseDecoder responseDecoder = new ResponseDecoder(); + + @Before + @SuppressWarnings("unchecked") + public void setMocks() { + transport = mock(NioHttpServerTransport.class); + Settings settings = Settings.EMPTY; + ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.getDefault(settings); + ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.getDefault(settings); + ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.getDefault(settings); + HttpHandlingSettings httpHandlingSettings = new HttpHandlingSettings(1024, + Math.toIntExact(maxChunkSize.getBytes()), + Math.toIntExact(maxHeaderSize.getBytes()), + Math.toIntExact(maxInitialLineLength.getBytes()), + SETTING_HTTP_RESET_COOKIES.getDefault(settings), + SETTING_HTTP_COMPRESSION.getDefault(settings), + SETTING_HTTP_COMPRESSION_LEVEL.getDefault(settings), + SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings)); + ThreadContext threadContext = new ThreadContext(settings); + nioSocketChannel = mock(NioSocketChannel.class); + handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, threadContext); + } + + public void testSuccessfulDecodeHttpRequest() throws IOException { + String uri = "localhost:9090/" + randomAlphaOfLength(8); + HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); + + ByteBuf buf = requestEncoder.encode(httpRequest); + int slicePoint = randomInt(buf.writerIndex() - 1); + + ByteBuf slicedBuf = buf.retainedSlice(0, slicePoint); + ByteBuf slicedBuf2 = buf.retainedSlice(slicePoint, buf.writerIndex()); + handler.consumeReads(toChannelBuffer(slicedBuf)); + + verify(transport, times(0)).dispatchRequest(any(RestRequest.class), any(RestChannel.class)); + + handler.consumeReads(toChannelBuffer(slicedBuf2)); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(RestRequest.class); + verify(transport).dispatchRequest(requestCaptor.capture(), any(RestChannel.class)); + + NioHttpRequest nioHttpRequest = (NioHttpRequest) requestCaptor.getValue(); + FullHttpRequest nettyHttpRequest = nioHttpRequest.getRequest(); + assertEquals(httpRequest.protocolVersion(), nettyHttpRequest.protocolVersion()); + assertEquals(httpRequest.method(), nettyHttpRequest.method()); + } + + public void testDecodeHttpRequestError() throws IOException { + String uri = "localhost:9090/" + randomAlphaOfLength(8); + HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, uri); + + ByteBuf buf = requestEncoder.encode(httpRequest); + buf.setByte(0, ' '); + buf.setByte(1, ' '); + buf.setByte(2, ' '); + + handler.consumeReads(toChannelBuffer(buf)); + + ArgumentCaptor exceptionCaptor = ArgumentCaptor.forClass(Throwable.class); + verify(transport).dispatchBadRequest(any(RestRequest.class), any(RestChannel.class), exceptionCaptor.capture()); + + assertTrue(exceptionCaptor.getValue() instanceof IllegalArgumentException); + } + + public void testDecodeHttpRequestContentLengthToLongGeneratesOutboundMessage() throws IOException { + String uri = "localhost:9090/" + randomAlphaOfLength(8); + HttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, uri, false); + HttpUtil.setContentLength(httpRequest, 1025); + HttpUtil.setKeepAlive(httpRequest, false); + + ByteBuf buf = requestEncoder.encode(httpRequest); + + handler.consumeReads(toChannelBuffer(buf)); + + verifyZeroInteractions(transport); + + List flushOperations = handler.pollFlushOperations(); + assertFalse(flushOperations.isEmpty()); + + FlushOperation flushOperation = flushOperations.get(0); + HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperation.getBuffersToWrite())); + assertEquals(HttpVersion.HTTP_1_1, response.protocolVersion()); + assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status()); + + flushOperation.getListener().accept(null, null); + // Since we have keep-alive set to false, we should close the channel after the response has been + // flushed + verify(nioSocketChannel).close(); + } + + @SuppressWarnings("unchecked") + public void testEncodeHttpResponse() throws IOException { + prepareHandlerForResponse(handler); + + FullHttpResponse fullHttpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + + SocketChannelContext context = mock(SocketChannelContext.class); + HttpWriteOperation writeOperation = new HttpWriteOperation(context, fullHttpResponse, mock(BiConsumer.class)); + List flushOperations = handler.writeToBytes(writeOperation); + + HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperations.get(0).getBuffersToWrite())); + + assertEquals(HttpResponseStatus.OK, response.status()); + assertEquals(HttpVersion.HTTP_1_1, response.protocolVersion()); + } + + private FullHttpRequest prepareHandlerForResponse(HttpReadWriteHandler adaptor) throws IOException { + HttpMethod method = HttpMethod.GET; + HttpVersion version = HttpVersion.HTTP_1_1; + String uri = "http://localhost:9090/" + randomAlphaOfLength(8); + + HttpRequest request = new DefaultFullHttpRequest(version, method, uri); + ByteBuf buf = requestEncoder.encode(request); + + handler.consumeReads(toChannelBuffer(buf)); + + ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(RestRequest.class); + verify(transport).dispatchRequest(requestCaptor.capture(), any(RestChannel.class)); + + NioHttpRequest nioHttpRequest = (NioHttpRequest) requestCaptor.getValue(); + FullHttpRequest requestParsed = nioHttpRequest.getRequest(); + assertNotNull(requestParsed); + assertEquals(requestParsed.method(), method); + assertEquals(requestParsed.protocolVersion(), version); + assertEquals(requestParsed.uri(), uri); + return requestParsed; + } + + private InboundChannelBuffer toChannelBuffer(ByteBuf buf) { + InboundChannelBuffer buffer = InboundChannelBuffer.allocatingInstance(); + int readableBytes = buf.readableBytes(); + buffer.ensureCapacity(readableBytes); + int bytesWritten = 0; + ByteBuffer[] byteBuffers = buffer.sliceBuffersTo(readableBytes); + int i = 0; + while (bytesWritten != readableBytes) { + ByteBuffer byteBuffer = byteBuffers[i++]; + int initialRemaining = byteBuffer.remaining(); + buf.readBytes(byteBuffer); + bytesWritten += initialRemaining - byteBuffer.remaining(); + } + buffer.incrementIndex(bytesWritten); + return buffer; + } + + private static class RequestEncoder { + + private final EmbeddedChannel requestEncoder = new EmbeddedChannel(new HttpRequestEncoder()); + + private ByteBuf encode(HttpRequest httpRequest) { + requestEncoder.writeOutbound(httpRequest); + return requestEncoder.readOutbound(); + } + } + + private static class ResponseDecoder { + + private final EmbeddedChannel responseDecoder = new EmbeddedChannel(new HttpResponseDecoder()); + + private HttpResponse decode(ByteBuf response) { + responseDecoder.writeInbound(response); + return responseDecoder.readInbound(); + } + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/Netty4HttpClient.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/Netty4HttpClient.java new file mode 100644 index 0000000000000..32f294f47ce9c --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/Netty4HttpClient.java @@ -0,0 +1,200 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpObject; +import io.netty.handler.codec.http.HttpObjectAggregator; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.io.Closeable; +import java.net.SocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static io.netty.handler.codec.http.HttpHeaderNames.HOST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; + +/** + * Tiny helper to send http requests over netty. + */ +class Netty4HttpClient implements Closeable { + + static Collection returnHttpResponseBodies(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (FullHttpResponse response : responses) { + list.add(response.content().toString(StandardCharsets.UTF_8)); + } + return list; + } + + static Collection returnOpaqueIds(Collection responses) { + List list = new ArrayList<>(responses.size()); + for (HttpResponse response : responses) { + list.add(response.headers().get("X-Opaque-Id")); + } + return list; + } + + private final Bootstrap clientBootstrap; + + Netty4HttpClient() { + clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).group(new NioEventLoopGroup()); + } + + public Collection get(SocketAddress remoteAddress, String... uris) throws InterruptedException { + Collection requests = new ArrayList<>(uris.length); + for (int i = 0; i < uris.length; i++) { + final HttpRequest httpRequest = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uris[i]); + httpRequest.headers().add(HOST, "localhost"); + httpRequest.headers().add("X-Opaque-ID", String.valueOf(i)); + requests.add(httpRequest); + } + return sendRequests(remoteAddress, requests); + } + + @SafeVarargs // Safe not because it doesn't do anything with the type parameters but because it won't leak them into other methods. + public final Collection post(SocketAddress remoteAddress, Tuple... urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.POST, remoteAddress, urisAndBodies); + } + + public final FullHttpResponse post(SocketAddress remoteAddress, FullHttpRequest httpRequest) throws InterruptedException { + Collection responses = sendRequests(remoteAddress, Collections.singleton(httpRequest)); + assert responses.size() == 1 : "expected 1 and only 1 http response"; + return responses.iterator().next(); + } + + @SafeVarargs // Safe not because it doesn't do anything with the type parameters but because it won't leak them into other methods. + public final Collection put(SocketAddress remoteAddress, Tuple... urisAndBodies) + throws InterruptedException { + return processRequestsWithBody(HttpMethod.PUT, remoteAddress, urisAndBodies); + } + + private Collection processRequestsWithBody(HttpMethod method, SocketAddress remoteAddress, Tuple... urisAndBodies) throws InterruptedException { + Collection requests = new ArrayList<>(urisAndBodies.length); + for (Tuple uriAndBody : urisAndBodies) { + ByteBuf content = Unpooled.copiedBuffer(uriAndBody.v2(), StandardCharsets.UTF_8); + HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, method, uriAndBody.v1(), content); + request.headers().add(HttpHeaderNames.HOST, "localhost"); + request.headers().add(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); + request.headers().add(HttpHeaderNames.CONTENT_TYPE, "application/json"); + requests.add(request); + } + return sendRequests(remoteAddress, requests); + } + + private synchronized Collection sendRequests( + final SocketAddress remoteAddress, + final Collection requests) throws InterruptedException { + final CountDownLatch latch = new CountDownLatch(requests.size()); + final Collection content = Collections.synchronizedList(new ArrayList<>(requests.size())); + + clientBootstrap.handler(new CountDownLatchHandler(latch, content)); + + ChannelFuture channelFuture = null; + try { + channelFuture = clientBootstrap.connect(remoteAddress); + channelFuture.sync(); + + for (HttpRequest request : requests) { + channelFuture.channel().writeAndFlush(request); + } + latch.await(30, TimeUnit.SECONDS); + + } finally { + if (channelFuture != null) { + channelFuture.channel().close().sync(); + } + } + + return content; + } + + @Override + public void close() { + clientBootstrap.config().group().shutdownGracefully().awaitUninterruptibly(); + } + + /** + * helper factory which adds returned data to a list and uses a count down latch to decide when done + */ + private static class CountDownLatchHandler extends ChannelInitializer { + + private final CountDownLatch latch; + private final Collection content; + + CountDownLatchHandler(final CountDownLatch latch, final Collection content) { + this.latch = latch; + this.content = content; + } + + @Override + protected void initChannel(SocketChannel ch) throws Exception { + final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(); + ch.pipeline().addLast(new HttpResponseDecoder()); + ch.pipeline().addLast(new HttpRequestEncoder()); + ch.pipeline().addLast(new HttpObjectAggregator(maxContentLength)); + ch.pipeline().addLast(new SimpleChannelInboundHandler() { + @Override + protected void channelRead0(ChannelHandlerContext ctx, HttpObject msg) throws Exception { + final FullHttpResponse response = (FullHttpResponse) msg; + content.add(response.copy()); + latch.countDown(); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + super.exceptionCaught(ctx, cause); + latch.countDown(); + } + }); + } + + } + +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NettyAdaptorTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NettyAdaptorTests.java new file mode 100644 index 0000000000000..d6944a5f510e2 --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NettyAdaptorTests.java @@ -0,0 +1,177 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelOutboundHandlerAdapter; +import io.netty.channel.ChannelPromise; +import io.netty.channel.SimpleChannelInboundHandler; +import org.elasticsearch.nio.FlushOperation; +import org.elasticsearch.test.ESTestCase; + +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicBoolean; + +public class NettyAdaptorTests extends ESTestCase { + + public void testBasicRead() { + TenIntsToStringsHandler handler = new TenIntsToStringsHandler(); + NettyAdaptor nettyAdaptor = new NettyAdaptor(handler); + ByteBuffer message = ByteBuffer.allocate(40); + for (int i = 0; i < 10; ++i) { + message.putInt(i); + } + message.flip(); + ByteBuffer[] buffers = {message}; + assertEquals(40, nettyAdaptor.read(buffers)); + assertEquals("0123456789", handler.result); + } + + public void testBasicReadWithExcessData() { + TenIntsToStringsHandler handler = new TenIntsToStringsHandler(); + NettyAdaptor nettyAdaptor = new NettyAdaptor(handler); + ByteBuffer message = ByteBuffer.allocate(52); + for (int i = 0; i < 13; ++i) { + message.putInt(i); + } + message.flip(); + ByteBuffer[] buffers = {message}; + assertEquals(40, nettyAdaptor.read(buffers)); + assertEquals("0123456789", handler.result); + } + + public void testUncaughtReadExceptionsBubbleUp() { + NettyAdaptor nettyAdaptor = new NettyAdaptor(new TenIntsToStringsHandler()); + ByteBuffer message = ByteBuffer.allocate(40); + for (int i = 0; i < 9; ++i) { + message.putInt(i); + } + message.flip(); + ByteBuffer[] buffers = {message}; + expectThrows(IllegalStateException.class, () -> nettyAdaptor.read(buffers)); + } + + public void testWriteInsidePipelineIsCaptured() { + TenIntsToStringsHandler tenIntsToStringsHandler = new TenIntsToStringsHandler(); + PromiseCheckerHandler promiseCheckerHandler = new PromiseCheckerHandler(); + NettyAdaptor nettyAdaptor = new NettyAdaptor(new CapitalizeWriteHandler(), + promiseCheckerHandler, + new WriteInMiddleHandler(), + tenIntsToStringsHandler); + byte[] bytes = "SHOULD_WRITE".getBytes(StandardCharsets.UTF_8); + ByteBuffer message = ByteBuffer.wrap(bytes); + ByteBuffer[] buffers = {message}; + assertNull(nettyAdaptor.pollOutboundOperation()); + nettyAdaptor.read(buffers); + assertFalse(tenIntsToStringsHandler.wasCalled); + FlushOperation flushOperation = nettyAdaptor.pollOutboundOperation(); + assertNotNull(flushOperation); + assertEquals("FAILED", Unpooled.wrappedBuffer(flushOperation.getBuffersToWrite()).toString(StandardCharsets.UTF_8)); + assertFalse(promiseCheckerHandler.isCalled.get()); + flushOperation.getListener().accept(null, null); + assertTrue(promiseCheckerHandler.isCalled.get()); + } + + public void testCloseListener() { + AtomicBoolean listenerCalled = new AtomicBoolean(false); + CloseChannelHandler handler = new CloseChannelHandler(); + NettyAdaptor nettyAdaptor = new NettyAdaptor(handler); + byte[] bytes = "SHOULD_CLOSE".getBytes(StandardCharsets.UTF_8); + ByteBuffer[] buffers = {ByteBuffer.wrap(bytes)}; + nettyAdaptor.addCloseListener((v, e) -> listenerCalled.set(true)); + assertFalse(listenerCalled.get()); + nettyAdaptor.read(buffers); + assertTrue(listenerCalled.get()); + + } + + private class TenIntsToStringsHandler extends SimpleChannelInboundHandler { + + private String result; + boolean wasCalled = false; + + @Override + protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Exception { + wasCalled = true; + if (msg.readableBytes() < 10 * 4) { + throw new IllegalStateException("Must have ten ints"); + } + StringBuilder builder = new StringBuilder(); + for (int i = 0; i < 10; ++i) { + builder.append(msg.readInt()); + } + result = builder.toString(); + } + } + + private class WriteInMiddleHandler extends ChannelInboundHandlerAdapter { + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + ByteBuf buffer = (ByteBuf) msg; + String bufferString = buffer.toString(StandardCharsets.UTF_8); + if (bufferString.equals("SHOULD_WRITE")) { + ctx.writeAndFlush("Failed"); + } else { + throw new IllegalArgumentException("Only accept SHOULD_WRITE message"); + } + } + } + + private class CapitalizeWriteHandler extends ChannelOutboundHandlerAdapter { + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + String string = (String) msg; + assert string.equals("Failed") : "Should be the same was what we wrote."; + super.write(ctx, Unpooled.wrappedBuffer(string.toUpperCase(Locale.ROOT).getBytes(StandardCharsets.UTF_8)), promise); + } + } + + private class PromiseCheckerHandler extends ChannelOutboundHandlerAdapter { + + private AtomicBoolean isCalled = new AtomicBoolean(false); + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + promise.addListener((f) -> isCalled.set(true)); + super.write(ctx, msg, promise); + } + } + + private class CloseChannelHandler extends ChannelInboundHandlerAdapter { + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + ByteBuf buffer = (ByteBuf) msg; + String bufferString = buffer.toString(StandardCharsets.UTF_8); + if (bufferString.equals("SHOULD_CLOSE")) { + ctx.close(); + } else { + throw new IllegalArgumentException("Only accept SHOULD_CLOSE message"); + } + } + } +} diff --git a/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java new file mode 100644 index 0000000000000..4741bd69a527a --- /dev/null +++ b/plugins/transport-nio/src/test/java/org/elasticsearch/http/nio/NioHttpServerTransportTests.java @@ -0,0 +1,353 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.http.nio; + +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.TooLongFrameException; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.MockBigArrays; +import org.elasticsearch.common.util.MockPageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.http.BindHttpException; +import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpTransportSettings; +import org.elasticsearch.http.NullDispatcher; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.rest.BytesRestResponse; +import org.elasticsearch.rest.RestChannel; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.rest.RestStatus.BAD_REQUEST; +import static org.elasticsearch.rest.RestStatus.OK; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +/** + * Tests for the {@link NioHttpServerTransport} class. + */ +public class NioHttpServerTransportTests extends ESTestCase { + + private NetworkService networkService; + private ThreadPool threadPool; + private MockBigArrays bigArrays; + + @Before + public void setup() throws Exception { + networkService = new NetworkService(Collections.emptyList()); + threadPool = new TestThreadPool("test"); + bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()); + } + + @After + public void shutdown() throws Exception { + if (threadPool != null) { + threadPool.shutdownNow(); + } + threadPool = null; + networkService = null; + bigArrays = null; + } + +// public void testCorsConfig() { +// final Set methods = new HashSet<>(Arrays.asList("get", "options", "post")); +// final Set headers = new HashSet<>(Arrays.asList("Content-Type", "Content-Length")); +// final String prefix = randomBoolean() ? " " : ""; // sometimes have a leading whitespace between comma delimited elements +// final Settings settings = Settings.builder() +// .put(SETTING_CORS_ENABLED.getKey(), true) +// .put(SETTING_CORS_ALLOW_ORIGIN.getKey(), "*") +// .put(SETTING_CORS_ALLOW_METHODS.getKey(), collectionToDelimitedString(methods, ",", prefix, "")) +// .put(SETTING_CORS_ALLOW_HEADERS.getKey(), collectionToDelimitedString(headers, ",", prefix, "")) +// .put(SETTING_CORS_ALLOW_CREDENTIALS.getKey(), true) +// .build(); +// final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); +// assertTrue(corsConfig.isAnyOriginSupported()); +// assertEquals(headers, corsConfig.allowedRequestHeaders()); +// assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); +// } + +// public void testCorsConfigWithDefaults() { +// final Set methods = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_METHODS.getDefault(Settings.EMPTY)); +// final Set headers = Strings.commaDelimitedListToSet(SETTING_CORS_ALLOW_HEADERS.getDefault(Settings.EMPTY)); +// final long maxAge = SETTING_CORS_MAX_AGE.getDefault(Settings.EMPTY); +// final Settings settings = Settings.builder().put(SETTING_CORS_ENABLED.getKey(), true).build(); +// final Netty4CorsConfig corsConfig = Netty4HttpServerTransport.buildCorsConfig(settings); +// assertFalse(corsConfig.isAnyOriginSupported()); +// assertEquals(Collections.emptySet(), corsConfig.origins().get()); +// assertEquals(headers, corsConfig.allowedRequestHeaders()); +// assertEquals(methods, corsConfig.allowedRequestMethods().stream().map(HttpMethod::name).collect(Collectors.toSet())); +// assertEquals(maxAge, corsConfig.maxAge()); +// assertFalse(corsConfig.isCredentialsAllowed()); +// } + + /** + * Test that {@link NioHttpServerTransport} supports the "Expect: 100-continue" HTTP header + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeader() throws InterruptedException { + final Settings settings = Settings.EMPTY; + final int contentLength = randomIntBetween(1, HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.get(settings).bytesAsInt()); + runExpectHeaderTest(settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.CONTINUE); + } + + /** + * Test that {@link NioHttpServerTransport} responds to a + * 100-continue expectation with too large a content-length + * with a 413 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectContinueHeaderContentLengthTooLong() throws InterruptedException { + final String key = HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(); + final int maxContentLength = randomIntBetween(1, 104857600); + final Settings settings = Settings.builder().put(key, maxContentLength + "b").build(); + final int contentLength = randomIntBetween(maxContentLength + 1, Integer.MAX_VALUE); + runExpectHeaderTest( + settings, HttpHeaderValues.CONTINUE.toString(), contentLength, HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE); + } + + /** + * Test that {@link NioHttpServerTransport} responds to an unsupported expectation with a 417 status. + * @throws InterruptedException if the client communication with the server is interrupted + */ + public void testExpectUnsupportedExpectation() throws InterruptedException { + runExpectHeaderTest(Settings.EMPTY, "chocolate=yummy", 0, HttpResponseStatus.EXPECTATION_FAILED); + } + + private void runExpectHeaderTest( + final Settings settings, + final String expectation, + final int contentLength, + final HttpResponseStatus expectedStatus) throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + @Override + public void dispatchRequest(RestRequest request, RestChannel channel, ThreadContext threadContext) { + channel.sendResponse(new BytesRestResponse(OK, BytesRestResponse.TEXT_CONTENT_TYPE, new BytesArray("done"))); + } + + @Override + public void dispatchBadRequest(RestRequest request, RestChannel channel, ThreadContext threadContext, Throwable cause) { + throw new AssertionError(); + } + }; + try (NioHttpServerTransport transport = new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, + xContentRegistry(), dispatcher)) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + try (Netty4HttpClient client = new Netty4HttpClient()) { + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + request.headers().set(HttpHeaderNames.EXPECT, expectation); + HttpUtil.setContentLength(request, contentLength); + + final FullHttpResponse response = client.post(remoteAddress.address(), request); + assertThat(response.status(), equalTo(expectedStatus)); + if (expectedStatus.equals(HttpResponseStatus.CONTINUE)) { + final FullHttpRequest continuationRequest = + new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", Unpooled.EMPTY_BUFFER); + final FullHttpResponse continuationResponse = client.post(remoteAddress.address(), continuationRequest); + + assertThat(continuationResponse.status(), is(HttpResponseStatus.OK)); + assertThat(new String(ByteBufUtil.getBytes(continuationResponse.content()), StandardCharsets.UTF_8), is("done")); + } + } + } + } + + public void testBindUnavailableAddress() { + try (NioHttpServerTransport transport = new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, + xContentRegistry(), new NullDispatcher())) { + transport.start(); + TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + Settings settings = Settings.builder().put("http.port", remoteAddress.getPort()).build(); + try (NioHttpServerTransport otherTransport = new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, + xContentRegistry(), new NullDispatcher())) { + BindHttpException bindHttpException = expectThrows(BindHttpException.class, () -> otherTransport.start()); + assertEquals("Failed to bind to [" + remoteAddress.getPort() + "]", bindHttpException.getMessage()); + } + } + } + + public void testBadRequest() throws InterruptedException { + final AtomicReference causeReference = new AtomicReference<>(); + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + throw new AssertionError(); + } + + @Override + public void dispatchBadRequest(final RestRequest request, + final RestChannel channel, + final ThreadContext threadContext, + final Throwable cause) { + causeReference.set(cause); + try { + final ElasticsearchException e = new ElasticsearchException("you sent a bad request and you should feel bad"); + channel.sendResponse(new BytesRestResponse(channel, BAD_REQUEST, e)); + } catch (final IOException e) { + throw new AssertionError(e); + } + } + + }; + + final Settings settings; + final int maxInitialLineLength; + final Setting httpMaxInitialLineLengthSetting = HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH; + if (randomBoolean()) { + maxInitialLineLength = httpMaxInitialLineLengthSetting.getDefault(Settings.EMPTY).bytesAsInt(); + settings = Settings.EMPTY; + } else { + maxInitialLineLength = randomIntBetween(1, 8192); + settings = Settings.builder().put(httpMaxInitialLineLengthSetting.getKey(), maxInitialLineLength + "b").build(); + } + + try (NioHttpServerTransport transport = + new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + transport.start(); + final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); + + try (Netty4HttpClient client = new Netty4HttpClient()) { + final String url = "/" + new String(new byte[maxInitialLineLength], Charset.forName("UTF-8")); + final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, url); + + final FullHttpResponse response = client.post(remoteAddress.address(), request); + assertThat(response.status(), equalTo(HttpResponseStatus.BAD_REQUEST)); + assertThat( + new String(response.content().array(), Charset.forName("UTF-8")), + containsString("you sent a bad request and you should feel bad")); + } + } + + assertNotNull(causeReference.get()); + assertThat(causeReference.get(), instanceOf(TooLongFrameException.class)); + } + + public void testDispatchDoesNotModifyThreadContext() throws InterruptedException { + final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { + + @Override + public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { + threadContext.putHeader("foo", "bar"); + threadContext.putTransient("bar", "baz"); + } + + @Override + public void dispatchBadRequest(final RestRequest request, + final RestChannel channel, + final ThreadContext threadContext, + final Throwable cause) { + threadContext.putHeader("foo_bad", "bar"); + threadContext.putTransient("bar_bad", "baz"); + } + + }; + + try (NioHttpServerTransport transport = + new NioHttpServerTransport(Settings.EMPTY, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { + transport.start(); + + transport.dispatchRequest(null, null); + assertNull(threadPool.getThreadContext().getHeader("foo")); + assertNull(threadPool.getThreadContext().getTransient("bar")); + + transport.dispatchBadRequest(null, null, null); + assertNull(threadPool.getThreadContext().getHeader("foo_bad")); + assertNull(threadPool.getThreadContext().getTransient("bar_bad")); + } + } + +// public void testReadTimeout() throws Exception { +// final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() { +// +// @Override +// public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) { +// throw new AssertionError("Should not have received a dispatched request"); +// } +// +// @Override +// public void dispatchBadRequest(final RestRequest request, +// final RestChannel channel, +// final ThreadContext threadContext, +// final Throwable cause) { +// throw new AssertionError("Should not have received a dispatched request"); +// } +// +// }; +// +// Settings settings = Settings.builder() +// .put(HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), new TimeValue(randomIntBetween(100, 300))) +// .build(); +// +// +// NioEventLoopGroup group = new NioEventLoopGroup(); +// try (NioHttpServerTransport transport = +// new NioHttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) { +// transport.start(); +// final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses()); +// +// AtomicBoolean channelClosed = new AtomicBoolean(false); +// +// Bootstrap clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).handler(new ChannelInitializer() { +// +// @Override +// protected void initChannel(SocketChannel ch) { +// ch.pipeline().addLast(new ChannelHandlerAdapter() {}); +// +// } +// }).group(group); +// ChannelFuture connect = clientBootstrap.connect(remoteAddress.address()); +// connect.channel().closeFuture().addListener(future -> channelClosed.set(true)); +// +// assertBusy(() -> assertTrue("Channel should be closed due to read timeout", channelClosed.get()), 5, TimeUnit.SECONDS); +// +// } finally { +// group.shutdownGracefully().await(); +// } +// } +} diff --git a/qa/build.gradle b/qa/build.gradle index 494f6e3cd94b7..709c309359ecf 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -4,7 +4,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask subprojects { Project subproj -> subproj.tasks.withType(RestIntegTestTask) { subproj.extensions.configure("${it.name}Cluster") { cluster -> - cluster.distribution = 'oss-zip' + cluster.distribution = System.getProperty('tests.distribution', 'oss-zip') } } } diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index c373adb5d743d..f7b87905b24d5 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -35,6 +35,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -134,7 +135,7 @@ public void testSearchSkipUnavailable() throws IOException { for (int i = 0; i < 10; i++) { restHighLevelClient.index(new IndexRequest("index", "doc", String.valueOf(i)).source("field", "value")); } - Response refreshResponse = client().performRequest("POST", "/index/_refresh"); + Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { @@ -223,10 +224,11 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { { //check that skip_unavailable alone cannot be set - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody( - Collections.singletonMap("skip_unavailable", randomBoolean())); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody( + Collections.singletonMap("skip_unavailable", randomBoolean()))); ResponseException responseException = expectThrows(ResponseException.class, - () -> client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity)); + () -> client().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("Missing required setting [search.remote.remote1.seeds] " + @@ -240,9 +242,10 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { { //check that seeds cannot be reset alone if skip_unavailable is set - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody(Collections.singletonMap("seeds", null)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody(Collections.singletonMap("seeds", null))); ResponseException responseException = expectThrows(ResponseException.class, - () -> client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity)); + () -> client().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("Missing required setting [search.remote.remote1.seeds] " + "for setting [search.remote.remote1.skip_unavailable]")); @@ -284,8 +287,9 @@ private static void assertSearchConnectFailure() { private static void updateRemoteClusterSettings(Map settings) throws IOException { - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody(settings); - Response response = client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody(settings)); + Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); } diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 4e69a478562a7..992d3ce71f623 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -21,6 +21,7 @@ import org.apache.http.ConnectionClosedException; import org.apache.lucene.util.Constants; +import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; @@ -51,7 +52,8 @@ public void testDieWithDignity() throws Exception { assertThat(pidFileLines, hasSize(1)); final int pid = Integer.parseInt(pidFileLines.get(0)); Files.delete(pidFile); - IOException e = expectThrows(IOException.class, () -> client().performRequest("GET", "/_die_with_dignity")); + IOException e = expectThrows(IOException.class, + () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); Matcher failureMatcher = instanceOf(ConnectionClosedException.class); if (Constants.WINDOWS) { /* diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index d335ac982fd8a..da99bbb4c8036 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -54,9 +54,16 @@ for (Version version : bwcVersions.wireCompatible) { bwcTest.dependsOn(versionBwcTest) } - /* To support taking index snapshots, we have to set path.repo setting */ tasks.getByName("${baseName}#mixedClusterTestRunner").configure { + /* To support taking index snapshots, we have to set path.repo setting */ systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + if ('zip'.equals(extension.distribution)) { + systemProperty 'tests.rest.blacklist', [ + 'cat.templates/10_basic/No templates', + 'cat.templates/10_basic/Sort templates', + 'cat.templates/10_basic/Multiple template', + ].join(',') + } } } diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index a38ff284a1a05..22a3fa65eece0 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -19,9 +19,8 @@ package org.elasticsearch.backwards; import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -34,25 +33,21 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; public class IndexingIT extends ESRestTestCase { private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; - assertOK(client().performRequest("PUT", index + "/test/" + id, emptyMap(), - new StringEntity("{\"test\": \"test_" + randomAsciiOfLength(2) + "\"}", ContentType.APPLICATION_JSON))); + Request request = new Request("PUT", index + "/test/" + id); + request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); + assertOK(client().performRequest(request)); } return numDocs; } @@ -105,7 +100,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("allowing shards on all nodes"); updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); List shards = buildShards(index, nodes, newNodeClient); Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("primary resolved to: " + primary.getNode().getNodeName()); @@ -117,7 +112,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after allowing shards on all nodes", nUpdates); final int finalVersionForDoc2 = indexDocWithConcurrentUpdates(index, 2, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); logger.info("primary resolved to: " + primary.getNode().getNodeName()); @@ -133,7 +128,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after moving primary", nUpdates); final int finalVersionForDoc3 = indexDocWithConcurrentUpdates(index, 3, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 3, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc3); @@ -146,7 +141,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 0", nUpdates); final int finalVersionForDoc4 = indexDocWithConcurrentUpdates(index, 4, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 4, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc4); @@ -159,7 +154,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 1", nUpdates); final int finalVersionForDoc5 = indexDocWithConcurrentUpdates(index, 5, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 5, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc5); @@ -191,7 +186,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("allowing shards on all nodes"); updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); for (final String bwcName : bwcNamesList) { assertCount(index, "_only_nodes:" + bwcName, numDocs); } @@ -222,7 +217,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("setting number of replicas to 1"); updateIndexSettings(index, Settings.builder().put("index.number_of_replicas", 1)); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); for (Shard shard : buildShards(index, nodes, newNodeClient)) { assertCount(index, "_only_nodes:" + shard.node.nodeName, numDocs); @@ -237,20 +232,18 @@ public void testUpdateSnapshotStatus() throws Exception { logger.info("cluster discovered: {}", nodes.toString()); // Create the repository before taking the snapshot. - String repoConfig = Strings + Request request = new Request("PUT", "/_snapshot/repo"); + request.setJsonEntity(Strings .toString(JsonXContent.contentBuilder() .startObject() - .field("type", "fs") - .startObject("settings") - .field("compress", randomBoolean()) - .field("location", System.getProperty("tests.path.repo")) - .endObject() - .endObject()); - - assertOK( - client().performRequest("PUT", "/_snapshot/repo", emptyMap(), - new StringEntity(repoConfig, ContentType.APPLICATION_JSON)) - ); + .field("type", "fs") + .startObject("settings") + .field("compress", randomBoolean()) + .field("location", System.getProperty("tests.path.repo")) + .endObject() + .endObject())); + + assertOK(client().performRequest(request)); String bwcNames = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); @@ -264,34 +257,36 @@ public void testUpdateSnapshotStatus() throws Exception { createIndex(index, settings.build()); indexDocs(index, 0, between(50, 100)); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); - assertOK( - client().performRequest("PUT", "/_snapshot/repo/bwc-snapshot", singletonMap("wait_for_completion", "true"), - new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON)) - ); + request = new Request("PUT", "/_snapshot/repo/bwc-snapshot"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); + assertOK(client().performRequest(request)); // Allocating shards on all nodes, taking snapshots should happen on all nodes. updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); - assertOK( - client().performRequest("PUT", "/_snapshot/repo/mixed-snapshot", singletonMap("wait_for_completion", "true"), - new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON)) - ); + request = new Request("PUT", "/_snapshot/repo/mixed-snapshot"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); } private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { - final Response response = client().performRequest("GET", index + "/_count", Collections.singletonMap("preference", preference)); + Request request = new Request("GET", index + "/_count"); + request.addParameter("preference", preference); + final Response response = client().performRequest(request); assertOK(response); final int actualCount = Integer.parseInt(ObjectPath.createFromResponse(response).evaluate("count").toString()); assertThat(actualCount, equalTo(expectedCount)); } private void assertVersion(final String index, final int docId, final String preference, final int expectedVersion) throws IOException { - final Response response = client().performRequest("GET", index + "/test/" + docId, - Collections.singletonMap("preference", preference)); + Request request = new Request("GET", index + "/test/" + docId); + request.addParameter("preference", preference); + final Response response = client().performRequest(request); assertOK(response); final int actualVersion = Integer.parseInt(ObjectPath.createFromResponse(response).evaluate("_version").toString()); assertThat("version mismatch for doc [" + docId + "] preference [" + preference + "]", actualVersion, equalTo(expectedVersion)); @@ -323,7 +318,9 @@ private void assertSeqNoOnShards(String index, Nodes nodes, int numDocs, RestCli } private List buildShards(String index, Nodes nodes, RestClient client) throws IOException { - Response response = client.performRequest("GET", index + "/_stats", singletonMap("level", "shards")); + Request request = new Request("GET", index + "/_stats"); + request.addParameter("level", "shards"); + Response response = client.performRequest(request); List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); ArrayList shards = new ArrayList<>(); for (Object shard : shardStats) { @@ -341,7 +338,7 @@ private List buildShards(String index, Nodes nodes, RestClient client) th } private Nodes buildNodeAndVersions() throws IOException { - Response response = client().performRequest("GET", "_nodes"); + Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); Map nodesAsMap = objectPath.evaluate("nodes"); Nodes nodes = new Nodes(); @@ -352,7 +349,7 @@ private Nodes buildNodeAndVersions() throws IOException { Version.fromString(objectPath.evaluate("nodes." + id + ".version")), HttpHost.create(objectPath.evaluate("nodes." + id + ".http.publish_address")))); } - response = client().performRequest("GET", "_cluster/state"); + response = client().performRequest(new Request("GET", "_cluster/state")); nodes.setMasterNodeId(ObjectPath.createFromResponse(response).evaluate("master_node")); return nodes; } diff --git a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java index f3e03f006c5aa..2d3f55ab94bb4 100644 --- a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java +++ b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java @@ -19,10 +19,9 @@ package org.elasticsearch.bwc; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; @@ -189,13 +188,15 @@ public void testQueryBuilderBWC() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - Response rsp = client().performRequest("PUT", "/" + index, Collections.emptyMap(), - new StringEntity(Strings.toString(mappingsAndSettings), ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/" + index); + request.setJsonEntity(Strings.toString(mappingsAndSettings)); + Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); for (int i = 0; i < CANDIDATES.size(); i++) { - rsp = client().performRequest("PUT", "/" + index + "/doc/" + Integer.toString(i), Collections.emptyMap(), - new StringEntity((String) CANDIDATES.get(i)[0], ContentType.APPLICATION_JSON)); + request = new Request("PUT", "/" + index + "/doc/" + Integer.toString(i)); + request.setJsonEntity((String) CANDIDATES.get(i)[0]); + rsp = client().performRequest(request); assertEquals(201, rsp.getStatusLine().getStatusCode()); } } else { @@ -204,9 +205,10 @@ public void testQueryBuilderBWC() throws Exception { for (int i = 0; i < CANDIDATES.size(); i++) { QueryBuilder expectedQueryBuilder = (QueryBuilder) CANDIDATES.get(i)[1]; - Response rsp = client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), - new StringEntity("{\"query\": {\"ids\": {\"values\": [\"" + Integer.toString(i) + "\"]}}, " + - "\"docvalue_fields\" : [\"query.query_builder_field\"]}", ContentType.APPLICATION_JSON)); + Request request = new Request("GET", "/" + index + "/_search"); + request.setJsonEntity("{\"query\": {\"ids\": {\"values\": [\"" + Integer.toString(i) + "\"]}}, " + + "\"docvalue_fields\" : [\"query.query_builder_field\"]}"); + Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); Map hitRsp = (Map) ((List) ((Map)toMap(rsp).get("hits")).get("hits")).get(0); String queryBuilderStr = (String) ((List) ((Map) hitRsp.get("fields")).get("query.query_builder_field")).get(0); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 965f94607aebb..bfa856e381b12 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -31,11 +31,11 @@ import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentType; @@ -221,8 +221,10 @@ public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHea public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; - Response response = getRestClient().performRequest("GET", "/" + queryIndex + "/_search", - new BasicHeader(CUSTOM_HEADER, randomHeaderValue), new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + Request request = new Request("GET", "/" + queryIndex + "/_search"); + request.setHeaders(new BasicHeader(CUSTOM_HEADER, randomHeaderValue), + new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); List searchRequests = getRequests(SearchRequest.class); assertThat(searchRequests, hasSize(greaterThan(0))); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java index bdda44c1b7118..4ab64abda453b 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java @@ -20,8 +20,8 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; @@ -32,15 +32,16 @@ public class CorsNotSetIT extends HttpSmokeTestCase { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); } public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws IOException { - Response response = getRestClient().performRequest("GET", "/"); + Response response = getRestClient().performRequest(new Request("GET", "/")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java index 47215ae669b31..da48e51b63bbe 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java @@ -19,9 +19,9 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -54,21 +54,26 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testThatRegularExpressionWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue)); + Response response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); - corsValue = "https://localhost:9200"; - response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + corsValue = "https://localhost:9201"; + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue)); + response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); } public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOException { + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", "http://evil-host:9200")); try { - getRestClient().performRequest("GET", "/", new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200")); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -79,31 +84,38 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOExcep } public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws IOException { - Response response = getRestClient().performRequest("GET", "/", new BasicHeader("User-Agent", "Mozilla Bar")); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar")); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws IOException { - Response response = getRestClient().performRequest("GET", "/"); + Response response = getRestClient().performRequest(new Request("GET", "/")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } public void testThatPreFlightRequestWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("OPTIONS", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), + Request request = new Request("OPTIONS", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue), new BasicHeader("Access-Control-Request-Method", "GET")); + Response response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); assertNotNull(response.getHeader("Access-Control-Allow-Methods")); } public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { + String corsValue = "http://evil-host:9200"; + Request request = new Request("OPTIONS", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue), + new BasicHeader("Access-Control-Request-Method", "GET")); try { - getRestClient().performRequest("OPTIONS", "/", new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200"), - new BasicHeader("Access-Control-Request-Method", "GET")); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java index 260041fdbda91..bacf5495ab7ae 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java @@ -22,6 +22,7 @@ import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; @@ -106,11 +107,10 @@ public void testUniqueDeprecationResponsesMergedTogether() throws IOException { final String commaSeparatedIndices = Stream.of(indices).collect(Collectors.joining(",")); - final String body = "{\"query\":{\"bool\":{\"filter\":[{\"" + TestDeprecatedQueryBuilder.NAME + "\":{}}]}}}"; - // trigger all index deprecations - Response response = getRestClient().performRequest("GET", "/" + commaSeparatedIndices + "/_search", - Collections.emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("GET", "/" + commaSeparatedIndices + "/_search"); + request.setJsonEntity("{\"query\":{\"bool\":{\"filter\":[{\"" + TestDeprecatedQueryBuilder.NAME + "\":{}}]}}}"); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); final List deprecatedWarnings = getWarningHeaders(response.getHeaders()); @@ -162,8 +162,9 @@ private void doTestDeprecationWarningsAppearInHeaders() throws IOException { Collections.shuffle(settings, random()); // trigger all deprecations - Response response = getRestClient().performRequest("GET", "/_test_cluster/deprecated_settings", - Collections.emptyMap(), buildSettingsRequest(settings, useDeprecatedField)); + Request request = new Request("GET", "/_test_cluster/deprecated_settings"); + request.setEntity(buildSettingsRequest(settings, useDeprecatedField)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); final List deprecatedWarnings = getWarningHeaders(response.getHeaders()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java index fa71822e79e57..6b2f49c583317 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java @@ -20,12 +20,11 @@ package org.elasticsearch.http; import java.io.IOException; -import java.util.Collections; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -49,8 +48,10 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatErrorTraceParamReturns400() throws IOException { + Request request = new Request("DELETE", "/"); + request.addParameter("error_trace", "true"); ResponseException e = expectThrows(ResponseException.class, () -> - getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"))); + getRestClient().performRequest(request)); Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), is("application/json; charset=UTF-8")); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java index d0b80595a26ee..db37034973cf8 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java @@ -20,11 +20,11 @@ package org.elasticsearch.http; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import java.io.IOException; -import java.util.Collections; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; @@ -36,7 +36,9 @@ public class DetailedErrorsEnabledIT extends HttpSmokeTestCase { public void testThatErrorTraceWorksByDefault() throws IOException { try { - getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true")); + Request request = new Request("DELETE", "/"); + request.addParameter("error_trace", "true"); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -47,7 +49,7 @@ public void testThatErrorTraceWorksByDefault() throws IOException { } try { - getRestClient().performRequest("DELETE", "/"); + getRestClient().performRequest(new Request("DELETE", "/")); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java index 20ddd0d230ad4..6af08577393d9 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java @@ -19,41 +19,40 @@ package org.elasticsearch.http; import org.apache.http.HttpHeaders; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; -import java.util.Collections; public class HttpCompressionIT extends ESRestTestCase { private static final String GZIP_ENCODING = "gzip"; - private static final StringEntity SAMPLE_DOCUMENT = new StringEntity("{\n" + + private static final String SAMPLE_DOCUMENT = "{\n" + " \"name\": {\n" + " \"first name\": \"Steve\",\n" + " \"last name\": \"Jobs\"\n" + " }\n" + - "}", ContentType.APPLICATION_JSON); + "}"; public void testCompressesResponseIfRequested() throws IOException { - RestClient client = client(); - Response response = client.performRequest("GET", "/", new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING)); } public void testUncompressedResponseByDefault() throws IOException { - RestClient client = client(); - Response response = client.performRequest("GET", "/"); + Response response = client().performRequest(new Request("GET", "/")); assertEquals(200, response.getStatusLine().getStatusCode()); assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING)); - response = client.performRequest("POST", "/company/employees/1", Collections.emptyMap(), SAMPLE_DOCUMENT); + Request request = new Request("POST", "/company/employees/1"); + request.setJsonEntity(SAMPLE_DOCUMENT); + response = client().performRequest(request); assertEquals(201, response.getStatusLine().getStatusCode()); assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING)); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java index 0a2d7ed9b06f2..e1d55afea1b54 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java @@ -21,6 +21,7 @@ import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -45,10 +46,10 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { private void runTestNoHandlerRespectsAcceptHeader( final String accept, final String contentType, final String expect) throws IOException { - final ResponseException e = - expectThrows( - ResponseException.class, - () -> getRestClient().performRequest("GET", "/foo/bar/baz/qux/quux", new BasicHeader("Accept", accept))); + Request request = new Request("GET", "/foo/bar/baz/qux/quux"); + request.setHeaders(new BasicHeader("Accept", accept)); + final ResponseException e = expectThrows(ResponseException.class, + () -> getRestClient().performRequest(request)); final Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), equalTo(contentType)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java index 7d413cca97703..b4dbc50d52db7 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java @@ -19,9 +19,9 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -53,7 +53,7 @@ protected Collection> nodePlugins() { public void testThatSettingHeadersWorks() throws IOException { ensureGreen(); try { - getRestClient().performRequest("GET", "/_protected"); + getRestClient().performRequest(new Request("GET", "/_protected")); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -61,7 +61,9 @@ public void testThatSettingHeadersWorks() throws IOException { assertThat(response.getHeader("Secret"), equalTo("required")); } - Response authResponse = getRestClient().performRequest("GET", "/_protected", new BasicHeader("Secret", "password")); + Request request = new Request("GET", "/_protected"); + request.setHeaders(new BasicHeader("Secret", "password")); + Response authResponse = getRestClient().performRequest(request); assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java index c9e7dc451a053..901bffc9553d4 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java @@ -18,6 +18,7 @@ package org.elasticsearch.http; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; @@ -46,7 +47,7 @@ public class RestHttpResponseHeadersIT extends ESRestTestCase { * - Options). */ public void testValidEndpointOptionsResponseHttpHeader() throws Exception { - Response response = client().performRequest("OPTIONS", "/_tasks"); + Response response = client().performRequest(new Request("OPTIONS", "/_tasks")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Allow"), notNullValue()); List responseAllowHeaderStringArray = @@ -64,7 +65,7 @@ public void testValidEndpointOptionsResponseHttpHeader() throws Exception { */ public void testUnsupportedMethodResponseHttpHeader() throws Exception { try { - client().performRequest("DELETE", "/_tasks"); + client().performRequest(new Request("DELETE", "/_tasks")); fail("Request should have failed with 405 error"); } catch (ResponseException e) { Response response = e.getResponse(); @@ -85,9 +86,9 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { * 17853 for more information). */ public void testIndexSettingsPostRequest() throws Exception { - client().performRequest("PUT", "/testindex"); + client().performRequest(new Request("PUT", "/testindex")); try { - client().performRequest("POST", "/testindex/_settings"); + client().performRequest(new Request("POST", "/testindex/_settings")); fail("Request should have failed with 405 error"); } catch (ResponseException e) { Response response = e.getResponse(); diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 5df77bd0d9513..9d299e16f0210 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -27,3 +27,13 @@ integTest { integTestCluster { numNodes = 2 } + +integTestRunner { + if ('zip'.equals(integTestCluster.distribution)) { + systemProperty 'tests.rest.blacklist', [ + 'cat.templates/10_basic/No templates', + 'cat.templates/10_basic/Sort templates', + 'cat.templates/10_basic/Multiple template', + ].join(',') + } +} diff --git a/qa/smoke-test-rank-eval-with-mustache/build.gradle b/qa/smoke-test-rank-eval-with-mustache/build.gradle index 7274e65f4e1bd..122c2603719a0 100644 --- a/qa/smoke-test-rank-eval-with-mustache/build.gradle +++ b/qa/smoke-test-rank-eval-with-mustache/build.gradle @@ -26,3 +26,11 @@ dependencies { testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') } +/* + * One of the integration tests doesn't work with the zip distribution + * and will be fixed later. + * Tracked by https://github.com/elastic/elasticsearch/issues/30628 + */ +if ("zip".equals(integTestCluster.distribution)) { + integTestRunner.enabled = false +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml index a88b37ead3154..f94cf286fd898 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/10_basic.yml @@ -1,8 +1,8 @@ --- "Shrink index via API": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # creates an index with one document solely allocated on the master node # and shrinks it into a new index with a single shard diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml index ee7b2215d2187..6f532ff81c688 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/20_source_mapping.yml @@ -1,8 +1,8 @@ --- "Shrink index ignores target template mapping": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml index 50438384b3ab0..53a12aad787f7 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during shrink index": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml index 635673c182f2f..8cfe77042dd3f 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -33,8 +33,8 @@ setup: --- "Split index via API": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # make it read-only @@ -107,11 +107,8 @@ setup: --- "Split from 1 to N": - skip: - # when re-enabling uncomment the below skips - version: "all" - reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" - # version: " - 6.99.99" - # reason: expects warnings that pre-7.0.0 will not send + version: " - 6.99.99" + reason: Automatic preparation for splitting was added in 7.0.0 features: "warnings" - do: indices.create: @@ -213,8 +210,8 @@ setup: --- "Create illegal split indices": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # try to do an illegal split with number_of_routing_shards set diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 433ac040dd1e4..88d3f3c610202 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -1,11 +1,8 @@ --- "Split index ignores target template mapping": - skip: - # when re-enabling uncomment the below skips - version: "all" - reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" - # version: " - 6.99.99" - # reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # create index diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml index e0ace991f4f0d..9e64b2b8130ad 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/30_copy_settings.yml @@ -1,8 +1,8 @@ --- "Copy settings during split index": - skip: - version: " - 6.99.99" - reason: expects warnings that pre-7.0.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" - do: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml new file mode 100644 index 0000000000000..9dd54811fabaa --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/250_moving_fn.yml @@ -0,0 +1,46 @@ +setup: + - skip: + version: " - 6.4.0" + reason: "moving_fn added in 6.4.0" + +--- +"Bad window": + + - do: + catch: /\[window\] must be a positive, non-zero integer\./ + search: + body: + size: 0 + aggs: + the_histo: + date_histogram: + field: "date" + interval: "1d" + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: -1 + script: "MovingFunctions.windowMax(values)" + +--- +"Not under date_histo": + + - do: + catch: /\[window\] must be a positive, non-zero integer\./ + search: + body: + size: 0 + aggs: + the_avg: + avg: + field: "value_field" + the_mov_fn: + moving_fn: + buckets_path: "the_avg" + window: -1 + script: "MovingFunctions.windowMax(values)" + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml index 841d5cf611bab..19593decb6533 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/80_typed_keys.yml @@ -195,7 +195,13 @@ setup: --- "Test typed keys parameter for date_histogram aggregation and max_bucket pipeline aggregation": + - skip: + features: warnings + version: " - 6.4.0" + reason: "deprecation added in 6.4.0" - do: + warnings: + - 'The moving_avg aggregation has been deprecated in favor of the moving_fn aggregation.' search: typed_keys: true body: diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index 0427685b8ef4f..dff14bc8b393b 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -242,6 +242,35 @@ public static boolean reThrowIfNotNull(@Nullable Throwable e) { return true; } + /** + * If the specified cause is an unrecoverable error, this method will rethrow the cause on a separate thread so that it can not be + * caught and bubbles up to the uncaught exception handler. + * + * @param throwable the throwable to test + */ + public static void dieOnError(Throwable throwable) { + final Optional maybeError = ExceptionsHelper.maybeError(throwable, logger); + if (maybeError.isPresent()) { + /* + * Here be dragons. We want to rethrow this so that it bubbles up to the uncaught exception handler. Yet, Netty wraps too many + * invocations of user-code in try/catch blocks that swallow all throwables. This means that a rethrow here will not bubble up + * to where we want it to. So, we fork a thread and throw the exception from there where Netty can not get to it. We do not wrap + * the exception so as to not lose the original cause during exit. + */ + try { + // try to log the current stack trace + final String formatted = ExceptionsHelper.formatStackTrace(Thread.currentThread().getStackTrace()); + logger.error("fatal error\n{}", formatted); + } finally { + new Thread( + () -> { + throw maybeError.get(); + }) + .start(); + } + } + } + /** * Deduplicate the failures by exception message and index. */ diff --git a/server/src/main/java/org/elasticsearch/action/ActionModule.java b/server/src/main/java/org/elasticsearch/action/ActionModule.java index 42ff432240381..fa4d751a54aed 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/server/src/main/java/org/elasticsearch/action/ActionModule.java @@ -253,7 +253,6 @@ import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction; -import org.elasticsearch.rest.action.admin.indices.RestGetAllSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction; @@ -558,7 +557,6 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestGetAllAliasesAction(settings, restController)); registerHandler.accept(new RestGetAllMappingsAction(settings, restController)); - registerHandler.accept(new RestGetAllSettingsAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); diff --git a/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java b/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java index 885647441d01f..8740c446b068e 100644 --- a/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java +++ b/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java @@ -21,17 +21,20 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import static org.elasticsearch.ExceptionsHelper.detailedMessage; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * Information about task operation failures @@ -39,7 +42,10 @@ * The class is final due to serialization limitations */ public final class TaskOperationFailure implements Writeable, ToXContentFragment { - + private static final String TASK_ID = "task_id"; + private static final String NODE_ID = "node_id"; + private static final String STATUS = "status"; + private static final String REASON = "reason"; private final String nodeId; private final long taskId; @@ -48,6 +54,21 @@ public final class TaskOperationFailure implements Writeable, ToXContentFragment private final RestStatus status; + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("task_info", true, constructorObjects -> { + int i = 0; + String nodeId = (String) constructorObjects[i++]; + long taskId = (long) constructorObjects[i++]; + ElasticsearchException reason = (ElasticsearchException) constructorObjects[i]; + return new TaskOperationFailure(nodeId, taskId, reason); + }); + + static { + PARSER.declareString(constructorArg(), new ParseField(NODE_ID)); + PARSER.declareLong(constructorArg(), new ParseField(TASK_ID)); + PARSER.declareObject(constructorArg(), (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(REASON)); + } + public TaskOperationFailure(String nodeId, long taskId, Exception e) { this.nodeId = nodeId; this.taskId = taskId; @@ -98,13 +119,17 @@ public String toString() { return "[" + nodeId + "][" + taskId + "] failed, reason [" + getReason() + "]"; } + public static TaskOperationFailure fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("task_id", getTaskId()); - builder.field("node_id", getNodeId()); - builder.field("status", status.name()); + builder.field(TASK_ID, getTaskId()); + builder.field(NODE_ID, getNodeId()); + builder.field(STATUS, status.name()); if (reason != null) { - builder.field("reason"); + builder.field(REASON); builder.startObject(); ElasticsearchException.generateThrowableXContent(builder, params, reason); builder.endObject(); @@ -112,5 +137,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 88d8ff4679917..1233b7143ab77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -19,16 +19,19 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; -import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; @@ -40,10 +43,16 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + /** * Returns the list of tasks currently running on the nodes */ public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { + private static final String TASKS = "tasks"; + private static final String TASK_FAILURES = "task_failures"; + private static final String NODE_FAILURES = "node_failures"; private List tasks; @@ -56,11 +65,31 @@ public ListTasksResponse() { } public ListTasksResponse(List tasks, List taskFailures, - List nodeFailures) { + List nodeFailures) { super(taskFailures, nodeFailures); this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks)); } + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("list_tasks_response", true, + constructingObjects -> { + int i = 0; + @SuppressWarnings("unchecked") + List tasks = (List) constructingObjects[i++]; + @SuppressWarnings("unchecked") + List tasksFailures = (List) constructingObjects[i++]; + @SuppressWarnings("unchecked") + List nodeFailures = (List) constructingObjects[i]; + return new ListTasksResponse(tasks, tasksFailures, nodeFailures); + }); + + static { + PARSER.declareObjectArray(constructorArg(), TaskInfo.PARSER, new ParseField(TASKS)); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES)); + PARSER.declareObjectArray(optionalConstructorArg(), + (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(NODE_FAILURES)); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -159,7 +188,7 @@ public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params p builder.endObject(); } } - builder.startObject("tasks"); + builder.startObject(TASKS); for(TaskInfo task : entry.getValue()) { builder.startObject(task.getTaskId().toString()); task.toXContent(builder, params); @@ -177,7 +206,7 @@ public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params p */ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Params params) throws IOException { toXContentCommon(builder, params); - builder.startObject("tasks"); + builder.startObject(TASKS); for (TaskGroup group : getTaskGroups()) { builder.field(group.getTaskInfo().getTaskId().toString()); group.toXContent(builder, params); @@ -191,7 +220,7 @@ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Param */ public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params params) throws IOException { toXContentCommon(builder, params); - builder.startArray("tasks"); + builder.startArray(TASKS); for (TaskInfo taskInfo : getTasks()) { builder.startObject(); taskInfo.toXContent(builder, params); @@ -204,14 +233,14 @@ public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params p @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - toXContentGroupedByParents(builder, params); + toXContentGroupedByNone(builder, params); builder.endObject(); return builder; } private void toXContentCommon(XContentBuilder builder, Params params) throws IOException { if (getTaskFailures() != null && getTaskFailures().size() > 0) { - builder.startArray("task_failures"); + builder.startArray(TASK_FAILURES); for (TaskOperationFailure ex : getTaskFailures()){ builder.startObject(); builder.value(ex); @@ -221,8 +250,8 @@ private void toXContentCommon(XContentBuilder builder, Params params) throws IOE } if (getNodeFailures() != null && getNodeFailures().size() > 0) { - builder.startArray("node_failures"); - for (FailedNodeException ex : getNodeFailures()) { + builder.startArray(NODE_FAILURES); + for (ElasticsearchException ex : getNodeFailures()) { builder.startObject(); ex.toXContent(builder, params); builder.endObject(); @@ -231,6 +260,10 @@ private void toXContentCommon(XContentBuilder builder, Params params) throws IOE } } + public static ListTasksResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public String toString() { return Strings.toString(this); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index ad81302918eb3..82f0e38572e77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -44,7 +45,7 @@ * Registers a repository with given name, type and settings. If the repository with the same name already * exists in the cluster, the new repository will replace the existing repository. */ -public class PutRepositoryRequest extends AcknowledgedRequest { +public class PutRepositoryRequest extends AcknowledgedRequest implements ToXContentObject { private String name; @@ -232,4 +233,19 @@ public void writeTo(StreamOutput out) throws IOException { writeSettingsToStream(settings, out); out.writeBoolean(verify); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", name); + builder.field("type", type); + + builder.startObject("settings"); + settings.toXContent(builder, params); + builder.endObject(); + + builder.field("verify", verify); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java index c2b45743447f2..e58a1d9d147f9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -30,6 +32,13 @@ */ public class PutRepositoryResponse extends AcknowledgedResponse { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_repository", + true, args -> new PutRepositoryResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } + PutRepositoryResponse() { } @@ -49,4 +58,7 @@ public void writeTo(StreamOutput out) throws IOException { writeAcknowledged(out); } + public static PutRepositoryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 52fe03f58c28d..a7a5548552be2 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -71,8 +71,9 @@ protected ClusterBlockException checkBlock(CreateSnapshotRequest request, Cluste @Override protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener listener) { + final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); SnapshotsService.SnapshotRequest snapshotRequest = - new SnapshotsService.SnapshotRequest(request.repository(), request.snapshot(), "create_snapshot [" + request.snapshot() + "]") + new SnapshotsService.SnapshotRequest(request.repository(), snapshotName, "create_snapshot [" + snapshotName + "]") .indices(request.indices()) .indicesOptions(request.indicesOptions()) .partial(request.partial()) @@ -87,7 +88,7 @@ public void onResponse() { @Override public void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo) { if (snapshot.getRepository().equals(request.repository()) && - snapshot.getSnapshotId().getName().equals(request.snapshot())) { + snapshot.getSnapshotId().getName().equals(snapshotName)) { listener.onResponse(new CreateSnapshotResponse(snapshotInfo)); snapshotsService.removeListener(this); } @@ -96,7 +97,7 @@ public void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo) { @Override public void onSnapshotFailure(Snapshot snapshot, Exception e) { if (snapshot.getRepository().equals(request.repository()) && - snapshot.getSnapshotId().getName().equals(request.snapshot())) { + snapshot.getSnapshotId().getName().equals(snapshotName)) { listener.onFailure(e); snapshotsService.removeListener(this); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index e510c0719df2d..ca046c48accff 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -101,8 +101,6 @@ public void readFrom(StreamInput in) throws IOException { } if (in.getVersion().before(Version.V_6_4_0)) { copySettings = null; - } else if (in.getVersion().onOrAfter(Version.V_6_4_0) && in.getVersion().before(Version.V_7_0_0_alpha1)){ - copySettings = in.readBoolean(); } else { copySettings = in.readOptionalBoolean(); } @@ -116,10 +114,9 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { out.writeEnum(type); } + // noinspection StatementWithEmptyBody if (out.getVersion().before(Version.V_6_4_0)) { - } else if (out.getVersion().onOrAfter(Version.V_6_4_0) && out.getVersion().before(Version.V_7_0_0_alpha1)) { - out.writeBoolean(copySettings == null ? false : copySettings); } else { out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index b018e24a565b8..5d4e558dbb25b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -45,6 +46,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import java.io.IOException; @@ -543,9 +545,6 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (customs.isEmpty() == false) { - throw new IllegalArgumentException("Custom data type is no longer supported in index template [" + customs + "]"); - } builder.field("index_patterns", indexPatterns); builder.field("order", order); if (version != null) { @@ -558,8 +557,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject("mappings"); for (Map.Entry entry : mappings.entrySet()) { - Map mapping = XContentHelper.convertToMap(new BytesArray(entry.getValue()), false).v2(); - builder.field(entry.getKey(), mapping); + builder.field(entry.getKey()); + XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, entry.getValue()); + builder.copyCurrentStructure(parser); } builder.endObject(); @@ -568,6 +569,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws alias.toXContent(builder, params); } builder.endObject(); + + for (Map.Entry entry : customs.entrySet()) { + builder.field(entry.getKey(), entry.getValue(), params); + } + return builder; } } diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index 64c26d6b94aa5..b284ec87dd42c 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -19,12 +19,17 @@ package org.elasticsearch.action.support; +import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.rest.RestRequest; import java.io.IOException; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; @@ -35,41 +40,155 @@ */ public class IndicesOptions { - private static final IndicesOptions[] VALUES; + public enum WildcardStates { + OPEN, + CLOSED; - private static final byte IGNORE_UNAVAILABLE = 1; - private static final byte ALLOW_NO_INDICES = 2; - private static final byte EXPAND_WILDCARDS_OPEN = 4; - private static final byte EXPAND_WILDCARDS_CLOSED = 8; - private static final byte FORBID_ALIASES_TO_MULTIPLE_INDICES = 16; - private static final byte FORBID_CLOSED_INDICES = 32; - private static final byte IGNORE_ALIASES = 64; + public static final EnumSet NONE = EnumSet.noneOf(WildcardStates.class); - private static final byte STRICT_EXPAND_OPEN = 6; - private static final byte LENIENT_EXPAND_OPEN = 7; - private static final byte STRICT_EXPAND_OPEN_CLOSED = 14; - private static final byte STRICT_EXPAND_OPEN_FORBID_CLOSED = 38; - private static final byte STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = 48; + public static EnumSet parseParameter(Object value, EnumSet defaultStates) { + if (value == null) { + return defaultStates; + } - static { - short max = 1 << 7; - VALUES = new IndicesOptions[max]; - for (short id = 0; id < max; id++) { - VALUES[id] = new IndicesOptions((byte)id); + Set states = new HashSet<>(); + String[] wildcards = nodeStringArrayValue(value); + for (String wildcard : wildcards) { + if ("open".equals(wildcard)) { + states.add(OPEN); + } else if ("closed".equals(wildcard)) { + states.add(CLOSED); + } else if ("none".equals(wildcard)) { + states.clear(); + } else if ("all".equals(wildcard)) { + states.add(OPEN); + states.add(CLOSED); + } else { + throw new IllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); + } + } + + return states.isEmpty() ? NONE : EnumSet.copyOf(states); } } - private final byte id; + public enum Option { + IGNORE_UNAVAILABLE, + IGNORE_ALIASES, + ALLOW_NO_INDICES, + FORBID_ALIASES_TO_MULTIPLE_INDICES, + FORBID_CLOSED_INDICES; + + public static final EnumSet