From b94831563305d29f35a4bf73a0b9eb8cc0241549 Mon Sep 17 00:00:00 2001 From: "Daniel Doubrovkine (dB.)" Date: Wed, 5 May 2021 15:04:14 -0400 Subject: [PATCH] Converted all .asciidoc to .md. (#658) Signed-off-by: dblock --- .gitattributes | 1 - TESTING.asciidoc | 691 ------------------ TESTING.md | 461 ++++++++++++ Vagrantfile | 2 +- .../security/delegate_pki/README.asciidoc | 35 - .../client/security/delegate_pki/README.md | 24 + .../resources/certs/pem-utils/README.asciidoc | 175 ----- .../test/resources/certs/pem-utils/README.md | 125 ++++ qa/os/README.md | 2 +- rest-api-spec/{README.markdown => README.md} | 0 .../rest-api-spec/test/README.asciidoc | 482 ------------ .../resources/rest-api-spec/test/README.md | 400 ++++++++++ 12 files changed, 1012 insertions(+), 1386 deletions(-) delete mode 100644 TESTING.asciidoc create mode 100644 TESTING.md delete mode 100644 client/rest-high-level/src/test/resources/org/opensearch/client/security/delegate_pki/README.asciidoc create mode 100644 client/rest-high-level/src/test/resources/org/opensearch/client/security/delegate_pki/README.md delete mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/README.asciidoc create mode 100644 libs/ssl-config/src/test/resources/certs/pem-utils/README.md rename rest-api-spec/{README.markdown => README.md} (100%) delete mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/README.md diff --git a/.gitattributes b/.gitattributes index 0c4d2c9b20b63..e69de29bb2d1d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1 +0,0 @@ -CHANGELOG.asciidoc merge=union diff --git a/TESTING.asciidoc b/TESTING.asciidoc deleted file mode 100644 index d50efa0b5f686..0000000000000 --- a/TESTING.asciidoc +++ /dev/null @@ -1,691 +0,0 @@ -[[TestingFrameworkCheatsheet]] -= Testing - -[partintro] - -OpenSearch uses jUnit for testing, it also uses randomness in the -tests, that can be set using a seed, the following is a cheatsheet of -options for running the tests for OpenSearch. - -== Requirements - -You will need the following pieces of software to run these tests: -- Docker & Docker Compose -- Vagrant -- JDK 14 -- Gradle - - -== Creating packages - -To create a distribution without running the tests, simply run the -following: - ------------------------------ -./gradlew assemble ------------------------------ - -To create a platform-specific build, use the -following depending on your operating system: - ------------------------------ -./gradlew :distribution:archives:linux-tar:assemble -./gradlew :distribution:archives:darwin-tar:assemble -./gradlew :distribution:archives:windows-zip:assemble ------------------------------ - -=== Running OpenSearch from a checkout - -In order to run OpenSearch from source without building a package, you can -run it using Gradle: - -------------------------------------- -./gradlew run -------------------------------------- - -==== Launching and debugging from an IDE - -If you want to run OpenSearch from your IDE, the `./gradlew run` task -supports a remote debugging option: - ---------------------------------------------------------------------------- -./gradlew run --debug-jvm ---------------------------------------------------------------------------- - -This will instruct all JVMs (including any that run cli tools such as creating the keyring or adding users) -to suspend and initiate a debug connection on port incrementing from `5005`. -As such the IDE needs to be instructed to listen for connections on this port. -Since we might run multiple JVMs as part of configuring and starting the cluster it's -recommended to configure the IDE to initiate multiple listening attempts. In case of IntelliJ, this option -is called "Auto restart" and needs to be checked. In case of Eclipse, "Connection limit" setting -needs to be configured with a greater value (ie 10 or more). - -NOTE: If you have imported the project into IntelliJ according to the instructions in -link:/DEVELOPER_GUIDE.md#importing-the-project-into-intellij-idea[DEVELOPER_GUIDE.md] then a debug run configuration -named "Debug OpenSearch" will be created for you and configured appropriately. - -==== Other useful arguments - -- In order to start a node with a different max heap space add: `-Dtests.heap.size=4G` -- In order to disable assertions add: `-Dtests.asserts=false` -- In order to use a custom data directory: `--data-dir=/tmp/foo` -- In order to preserve data in between executions: `--preserve-data` -- In order to remotely attach a debugger to the process: `--debug-jvm` -- In order to set a different keystore password: `--keystore-password yourpassword` -- In order to set an OpenSearch setting, provide a setting with the following prefix: `-Dtests.opensearch.` - -=== Test case filtering. - -- `tests.class` is a class-filtering shell-like glob pattern, -- `tests.method` is a method-filtering glob pattern. - -Run a single test case (variants) - ----------------------------------------------------------- -./gradlew test -Dtests.class=org.opensearch.package.ClassName -./gradlew test "-Dtests.class=*.ClassName" ----------------------------------------------------------- - -Run all tests in a package and its sub-packages - ----------------------------------------------------- -./gradlew test "-Dtests.class=org.opensearch.package.*" ----------------------------------------------------- - -Run any test methods that contain 'esi' (like: ...r*esi*ze...) - -------------------------------- -./gradlew test "-Dtests.method=*esi*" -------------------------------- - -Run all tests that are waiting for a bugfix (disabled by default) - ------------------------------------------------- -./gradlew test -Dtests.filter=@awaitsfix ------------------------------------------------- - -=== Seed and repetitions. - -Run with a given seed (seed is a hex-encoded long). - ------------------------------- -./gradlew test -Dtests.seed=DEADBEEF ------------------------------- - -=== Repeats _all_ tests of ClassName N times. - -Every test repetition will have a different method seed -(derived from a single random master seed). - --------------------------------------------------- -./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName --------------------------------------------------- - -=== Repeats _all_ tests of ClassName N times. - -Every test repetition will have exactly the same master (0xdead) and -method-level (0xbeef) seed. - ------------------------------------------------------------------------- -./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.seed=DEAD:BEEF ------------------------------------------------------------------------- - -=== Repeats a given test N times - -(note the filters - individual test repetitions are given suffixes, -ie: testFoo[0], testFoo[1], etc... so using testmethod or tests.method -ending in a glob is necessary to ensure iterations are run). - -------------------------------------------------------------------------- -./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.method=mytest* -------------------------------------------------------------------------- - -Repeats N times but skips any tests after the first failure or M initial failures. - -------------------------------------------------------------- -./gradlew test -Dtests.iters=N -Dtests.failfast=true -Dtestcase=... -./gradlew test -Dtests.iters=N -Dtests.maxfailures=M -Dtestcase=... -------------------------------------------------------------- - -=== Test groups. - -Test groups can be enabled or disabled (true/false). - -Default value provided below in [brackets]. - ------------------------------------------------------------------- -./gradlew test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix) ------------------------------------------------------------------- - -=== Load balancing and caches. - -By default the tests run on multiple processes using all the available cores on all -available CPUs. Not including hyper-threading. -If you want to explicitly specify the number of JVMs you can do so on the command -line: - ----------------------------- -./gradlew test -Dtests.jvms=8 ----------------------------- - -Or in `~/.gradle/gradle.properties`: - ----------------------------- -systemProp.tests.jvms=8 ----------------------------- - -Its difficult to pick the "right" number here. Hypercores don't count for CPU -intensive tests and you should leave some slack for JVM-interal threads like -the garbage collector. And you have to have enough RAM to handle each JVM. - -=== Test compatibility. - -It is possible to provide a version that allows to adapt the tests behaviour -to older features or bugs that have been changed or fixed in the meantime. - ------------------------------------------ -./gradlew test -Dtests.compatibility=1.0.0 ------------------------------------------ - - -=== Miscellaneous. - -Run all tests without stopping on errors (inspect log files). - ------------------------------------------ -./gradlew test -Dtests.haltonfailure=false ------------------------------------------ - -Run more verbose output (JVM parameters, etc.). - ----------------------- -./gradlew test -verbose ----------------------- - -Change the default suite timeout to 5 seconds for all -tests (note the exclamation mark). - ---------------------------------------- -./gradlew test -Dtests.timeoutSuite=5000! ... ---------------------------------------- - -Change the logging level of OpenSearch (not Gradle) - --------------------------------- -./gradlew test -Dtests.opensearch.logger.level=DEBUG --------------------------------- - -Print all the logging output from the test runs to the commandline -even if tests are passing. - ------------------------------- -./gradlew test -Dtests.output=true ------------------------------- - -Configure the heap size. - ------------------------------- -./gradlew test -Dtests.heap.size=512m ------------------------------- - -Pass arbitrary jvm arguments. - ------------------------------- -# specify heap dump path -./gradlew test -Dtests.jvm.argline="-XX:HeapDumpPath=/path/to/heapdumps" -# enable gc logging -./gradlew test -Dtests.jvm.argline="-verbose:gc" -# enable security debugging -./gradlew test -Dtests.jvm.argline="-Djava.security.debug=access,failure" ------------------------------- - -== Running verification tasks - -To run all verification tasks, including static checks, unit tests, and integration tests: - ---------------------------------------------------------------------------- -./gradlew check ---------------------------------------------------------------------------- - -Note that this will also run the unit tests and precommit tasks first. If you want to just -run the in memory cluster integration tests (because you are debugging them): - ---------------------------------------------------------------------------- -./gradlew internalClusterTest ---------------------------------------------------------------------------- - -If you want to just run the precommit checks: - ---------------------------------------------------------------------------- -./gradlew precommit ---------------------------------------------------------------------------- - -Some of these checks will require `docker-compose` installed for bringing up -test fixtures. If it's not present those checks will be skipped automatically. - -== Testing the REST layer - -The REST layer is tested through specific tests that are executed against -a cluster that is configured and initialized via Gradle. The tests -themselves can be written in either Java or with a YAML based DSL. - -YAML based REST tests should be preferred since these are shared between clients. The YAML based tests describe the -operations to be executed and the obtained results that need to be tested. - -The YAML tests support various operators defined in the link:/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc[rest-api-spec] and adhere to the link:/rest-api-spec/README.markdown[OpenSearch REST API JSON specification] -In order to run the YAML tests, the relevant API specification needs -to be on the test classpath. Any gradle project that has support for REST -tests will get the primary API on it's class path. However, to better support -Gradle incremental builds, it is recommended to explicitly declare which -parts of the API the tests depend upon. - -For example: ---------------------------------------------------------------------------- -restResources { - restApi { - includeCore '_common', 'indices', 'index', 'cluster', 'nodes', 'get', 'ingest' - } -} ---------------------------------------------------------------------------- - -The REST tests are run automatically when executing the "./gradlew check" command. To run only the -YAML REST tests use the following command (modules and plugins may also include YAML REST tests): - ---------------------------------------------------------------------------- -./gradlew :rest-api-spec:yamlRestTest ---------------------------------------------------------------------------- - -A specific test case can be run with the following command: - ---------------------------------------------------------------------------- -./gradlew ':rest-api-spec:yamlRestTest' \ - --tests "org.opensearch.test.rest.ClientYamlTestSuiteIT" \ - -Dtests.method="test {p0=cat.segments/10_basic/Help}" ---------------------------------------------------------------------------- - -The YAML REST tests support all the options provided by the randomized runner, plus the following: - -* `tests.rest.suite`: comma separated paths of the test suites to be run -(by default loaded from /rest-api-spec/test). It is possible to run only a subset -of the tests providing a sub-folder or even a single yaml file (the default -/rest-api-spec/test prefix is optional when files are loaded from classpath) -e.g. -Dtests.rest.suite=index,get,create/10_with_id -* `tests.rest.blacklist`: comma separated globs that identify tests that are -blacklisted and need to be skipped -e.g. -Dtests.rest.blacklist=index/*/Index document,get/10_basic/* - -Java REST tests can be run with the "javaRestTest" task. - -For example : ---------------------------------------------------------------------------- -./gradlew :modules:mapper-extras:javaRestTest ---------------------------------------------------------------------------- - -A specific test case can be run with the following syntax (fqn.test {params}): - ---------------------------------------------------------------------------- -./gradlew ':modules:mapper-extras:javaRestTest' \ - --tests "org.opensearch.index.mapper.TokenCountFieldMapperIntegrationIT.testSearchByTokenCount {storeCountedFields=true loadCountedFields=false}" ---------------------------------------------------------------------------- - -yamlRestTest's and javaRestTest's are easy to identify, since they are found in a -respective source directory. However, there are some more specialized REST tests -that use custom task names. These are usually found in "qa" projects commonly -use the "integTest" task. - -If in doubt about which command to use, simply run :check - -Note that the REST tests, like all the integration tests, can be run against an external -cluster by specifying the `tests.cluster` property, which if present needs to contain a -comma separated list of nodes to connect to (e.g. localhost:9300). A transport client will -be created based on that and used for all the before|after test operations, and to extract -the http addresses of the nodes so that REST requests can be sent to them. - -== Testing packaging - -The packaging tests use Vagrant virtual machines or cloud instances to verify -that installing and running OpenSearch distributions works correctly on -supported operating systems. These tests should really only be run on ephemeral -systems because they're destructive; that is, these tests install and remove -packages and freely modify system settings, so you will probably regret it if -you execute them on your development machine. - -When you run a packaging test, Gradle will set up the target VM and mount your -repository directory in the VM. Once this is done, a Gradle task will issue a -Vagrant command to run a *nested* Gradle task on the VM. This nested Gradle -runs the actual "destructive" test classes. - -. Install Virtual Box and Vagrant. -+ -. (Optional) Install https://github.com/fgrehm/vagrant-cachier[vagrant-cachier] to squeeze -a bit more performance out of the process (Note: as of 2021, vagrant-cachier is unmaintained): -+ --------------------------------------- -vagrant plugin install vagrant-cachier --------------------------------------- -+ -. You can run all of the OS packaging tests with `./gradlew packagingTest`. -This task includes our legacy `bats` tests. To run only the OS tests that are -written in Java, run `.gradlew distroTest`, will cause Gradle to build the tar, -zip, and deb packages and all the plugins. It will then run the tests on every -available system. This will take a very long time. -+ -Fortunately, the various systems under test have their own Gradle tasks under -`qa/os`. To find out what packaging combinations can be tested on a system, run -the `tasks` task. For example: -+ ----------------------------------- -./gradlew :qa:os:ubuntu-1804:tasks ----------------------------------- -+ -If you want a quick test of the tarball and RPM packagings for Centos 7, you -would run: -+ -------------------------------------------------------------------------------------------------- -./gradlew :qa:os:centos-7:distroTest.rpm :qa:os:centos-7:distroTest.linux-archive -------------------------------------------------------------------------------------------------- - -Note that if you interrupt Gradle in the middle of running these tasks, any boxes started -will remain running and you'll have to stop them manually with `./gradlew --stop` or -`vagrant halt`. - -All the regular vagrant commands should just work so you can get a shell in a -VM running trusty by running -`vagrant up ubuntu-1604 --provider virtualbox && vagrant ssh ubuntu-1604`. - -These are the linux flavors supported, all of which we provide images for - -* ubuntu-1604 aka xenial -* ubuntu-1804 aka bionic beaver -* debian-8 aka jessie -* debian-9 aka stretch, the current debian stable distribution -* centos-6 -* centos-7 -* rhel-8 -* fedora-28 -* fedora-29 -* oel-6 aka Oracle Enterprise Linux 6 -* oel-7 aka Oracle Enterprise Linux 7 -* sles-12 -* opensuse-42 aka Leap - -We're missing the following from the support matrix because there aren't high -quality boxes available in vagrant atlas: - -* sles-11 - -=== Testing packaging on Windows - -The packaging tests also support Windows Server 2012R2 and Windows Server 2016. -Unfortunately we're not able to provide boxes for them in open source use -because of licensing issues. Any Virtualbox image that has WinRM and Powershell -enabled for remote users should work. - -Testing on Windows requires the https://github.com/criteo/vagrant-winrm[vagrant-winrm] plugin. - ------------------------------------- -vagrant plugin install vagrant-winrm ------------------------------------- - -Specify the image IDs of the Windows boxes to gradle with the following project -properties. They can be set in `~/.gradle/gradle.properties` like - ------------------------------------- -vagrant.windows-2012r2.id=my-image-id -vagrant.windows-2016.id=another-image-id ------------------------------------- - -or passed on the command line like `-Pvagrant.windows-2012r2.id=my-image-id` -`-Pvagrant.windows-2016=another-image-id` - -These properties are required for Windows support in all gradle tasks that -handle packaging tests. Either or both may be specified. - -If you're running vagrant commands outside of gradle, specify the Windows boxes -with the environment variables - -* `VAGRANT_WINDOWS_2012R2_BOX` -* `VAGRANT_WINDOWS_2016_BOX` - -=== Testing VMs are disposable - -It's important to think of VMs like cattle. If they become lame you just shoot -them and let vagrant reprovision them. Say you've hosed your precise VM: - ----------------------------------------------------- -vagrant ssh ubuntu-1604 -c 'sudo rm -rf /bin'; echo oops ----------------------------------------------------- - -All you've got to do to get another one is - ----------------------------------------------- -vagrant destroy -f ubuntu-1604 && vagrant up ubuntu-1604 --provider virtualbox ----------------------------------------------- - -The whole process takes a minute and a half on a modern laptop, two and a half -without vagrant-cachier. - -Some vagrant commands will work on all VMs at once: - ------------------- -vagrant halt -vagrant destroy -f ------------------- - -`vagrant up` would normally start all the VMs but we've prevented that because -that'd consume a ton of ram. - -=== Iterating on packaging tests - -Because our packaging tests are capable of testing many combinations of OS -(e.g., Windows, Linux, etc.), package type (e.g., zip file, RPM, etc.) and so forth, it's -faster to develop against smaller subsets of the tests. For example, to run -tests for the default archive distribution on Fedora 28: - ------------------------------------------------------------ -./gradlew :qa:os:fedora-28:distroTest.linux-archive ------------------------------------------------------------ - -These test tasks can use the `--tests`, `--info`, and `--debug` parameters just like -non-OS tests can. For example: - ------------------------------------------------------------ -./gradlew :qa:os:fedora-28:distroTest.linux-archive \ - --tests "com.opensearch.packaging.test.ArchiveTests" ------------------------------------------------------------ - -== Testing backwards compatibility - -Backwards compatibility tests exist to test upgrading from each supported version -to the current version. To run them all use: - -------------------------------------------------- -./gradlew bwcTest -------------------------------------------------- - -A specific version can be tested as well. For example, to test bwc with -version 5.3.2 run: - -------------------------------------------------- -./gradlew v5.3.2#bwcTest -------------------------------------------------- - -Use -Dtest.class and -Dtests.method to run a specific bwcTest test. -For example to run a specific tests from the x-pack rolling upgrade from 7.7.0: -------------------------------------------------- -./gradlew :x-pack:qa:rolling-upgrade:v7.7.0#bwcTest \ - -Dtests.class=org.opensearch.upgrades.UpgradeClusterClientYamlTestSuiteIT \ - -Dtests.method="test {p0=*/40_ml_datafeed_crud/*}" -------------------------------------------------- - -Tests are ran for versions that are not yet released but with which the current version will be compatible with. -These are automatically checked out and built from source. -See link:./buildSrc/src/main/java/org/opensearch/gradle/VersionCollection.java[VersionCollection] -and link:./distribution/bwc/build.gradle[distribution/bwc/build.gradle] -for more information. - -When running `./gradlew check`, minimal bwc checks are also run against compatible versions that are not yet released. - -==== BWC Testing against a specific remote/branch - -Sometimes a backward compatibility change spans two versions. A common case is a new functionality -that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). -To test the changes, you can instruct Gradle to build the BWC version from a another remote/branch combination instead of -pulling the release branch from GitHub. You do so using the `bwc.remote` and `bwc.refspec.BRANCH` system properties: - -------------------------------------------------- -./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x -------------------------------------------------- - -The branch needs to be available on the remote that the BWC makes of the -repository you run the tests from. Using the remote is a handy trick to make -sure that a branch is available and is up to date in the case of multiple runs. - -Example: - -Say you need to make a change to `master` and have a BWC layer in `5.x`. You -will need to: -. Create a branch called `index_req_change` off your remote `${remote}`. This -will contain your change. -. Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. -. Push both branches to your remote repository. -. Run the tests with `./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x`. - -==== Skip fetching latest - -For some BWC testing scenarios, you want to use the local clone of the -repository without fetching latest. For these use cases, you can set the system -property `tests.bwc.git_fetch_latest` to `false` and the BWC builds will skip -fetching the latest from the remote. - -== How to write good tests? - -=== Base classes for test cases - -There are multiple base classes for tests: - -* **`OpenSearchTestCase`**: The base class of all tests. It is typically extended - directly by unit tests. -* **`OpenSearchSingleNodeTestCase`**: This test case sets up a cluster that has a - single node. -* **`OpenSearchIntegTestCase`**: An integration test case that creates a cluster that - might have multiple nodes. -* **`OpenSearchRestTestCase`**: An integration tests that interacts with an external - cluster via the REST API. This is used for Java based REST tests. -* **`OpenSearchClientYamlSuiteTestCase` **: A subclass of `OpenSearchRestTestCase` used to run - YAML based REST tests. - -=== Good practices - -==== What kind of tests should I write? - -Unit tests are the preferred way to test some functionality: most of the time -they are simpler to understand, more likely to reproduce, and unlikely to be -affected by changes that are unrelated to the piece of functionality that is -being tested. - -The reason why `OpenSearchSingleNodeTestCase` exists is that all our components used to -be very hard to set up in isolation, which had led us to having a number of -integration tests but close to no unit tests. `OpenSearchSingleNodeTestCase` is a -workaround for this issue which provides an easy way to spin up a node and get -access to components that are hard to instantiate like `IndicesService`. -Whenever practical, you should prefer unit tests. - -Many tests extend `OpenSearchIntegTestCase`, mostly because this is how most tests used -to work in the early days of Elasticsearch. However the complexity of these -tests tends to make them hard to debug. Whenever the functionality that is -being tested isn't intimately dependent on how OpenSearch behaves as a -cluster, it is recommended to write unit tests or REST tests instead. - -In short, most new functionality should come with unit tests, and optionally -REST tests to test integration. - -==== Refactor code to make it easier to test - -Unfortunately, a large part of our code base is still hard to unit test. -Sometimes because some classes have lots of dependencies that make them hard to -instantiate. Sometimes because API contracts make tests hard to write. Code -refactors that make functionality easier to unit test are encouraged. If this -sounds very abstract to you, you can have a look at -https://github.com/elastic/elasticsearch/pull/16610[this pull request] for -instance, which is a good example. It refactors `IndicesRequestCache` in such -a way that: - - it no longer depends on objects that are hard to instantiate such as - `IndexShard` or `SearchContext`, - - time-based eviction is applied on top of the cache rather than internally, - which makes it easier to assert on what the cache is expected to contain at - a given time. - -=== Bad practices - -==== Use randomized-testing for coverage - -In general, randomization should be used for parameters that are not expected -to affect the behavior of the functionality that is being tested. For instance -the number of shards should not impact `date_histogram` aggregations, and the -choice of the `store` type (`niofs` vs `mmapfs`) does not affect the results of -a query. Such randomization helps improve confidence that we are not relying on -implementation details of one component or specifics of some setup. - -However it should not be used for coverage. For instance if you are testing a -piece of functionality that enters different code paths depending on whether -the index has 1 shards or 2+ shards, then we shouldn't just test against an -index with a random number of shards: there should be one test for the 1-shard -case, and another test for the 2+ shards case. - -==== Abuse randomization in multi-threaded tests - -Multi-threaded tests are often not reproducible due to the fact that there is -no guarantee on the order in which operations occur across threads. Adding -randomization to the mix usually makes things worse and should be done with -care. - -== Test coverage analysis - -Generating test coverage reports for OpenSearch is currently not possible through Gradle. -However, it _is_ possible to gain insight in code coverage using IntelliJ's built-in coverage -analysis tool that can measure coverage upon executing specific tests. Eclipse may also be able -to do the same using the EclEmma plugin. - -Test coverage reporting used to be possible with JaCoCo when OpenSearch was using Maven -as its build system. Since the switch to Gradle though, this is no longer possible, seeing as -the code currently used to build OpenSearch does not allow JaCoCo to recognize its tests. -For more information on this, see the discussion in https://github.com/elastic/elasticsearch/issues/28867[issue #28867]. - ---------------------------------------------------------------------------- - -Read your IDE documentation for how to attach a debugger to a JVM process. - -== Building with extra plugins -Additional plugins may be built alongside OpenSearch, where their -dependency on OpenSearch will be substituted with the local OpenSearch -build. To add your plugin, create a directory called opensearch-extra as -a sibling of OpenSearch. Checkout your plugin underneath opensearch-extra -and the build will automatically pick it up. You can verify the plugin is -included as part of the build by checking the projects of the build. - ---------------------------------------------------------------------------- -./gradlew projects ---------------------------------------------------------------------------- - -== Environment misc - -There is a known issue with macOS localhost resolve strategy that can cause -some integration tests to fail. This is because integration tests have timings -for cluster formation, discovery, etc. that can be exceeded if name resolution -takes a long time. -To fix this, make sure you have your computer name (as returned by `hostname`) -inside `/etc/hosts`, e.g.: -.... -127.0.0.1 localhost OpenSearchMBP.local -255.255.255.255 broadcasthost -::1 localhost OpenSearchMBP.local` -.... - -== Benchmarking - -For changes that might affect the performance characteristics of OpenSearch -you should also run macrobenchmarks. There is also a macrobenchmarking tool -called https://github.com/elastic/rally[Rally] -which you can use to measure the performance impact. To get started, -please see https://esrally.readthedocs.io/en/stable/[Rally's documentation]. diff --git a/TESTING.md b/TESTING.md new file mode 100644 index 0000000000000..100f5783f3a4b --- /dev/null +++ b/TESTING.md @@ -0,0 +1,461 @@ +OpenSearch uses [jUnit](https://junit.org/junit5/) for testing, it also uses randomness in the tests, that can be set using a seed, the following is a cheatsheet of options for running the tests for OpenSearch. + +- [Requirements](#requirements) +- [Creating packages](#creating-packages) + - [Running OpenSearch from a checkout](#running-opensearch-from-a-checkout) + - [Launching and debugging from an IDE](#launching-and-debugging-from-an-ide) + - [Other useful arguments](#other-useful-arguments) + - [Test case filtering](#test-case-filtering) + - [Seed and repetitions](#seed-and-repetitions) + - [Repeats *all* tests of ClassName N times](#repeats-all-tests-of-classname-n-times) + - [Repeats *all* tests of ClassName N times](#repeats-all-tests-of-classname-n-times-1) + - [Repeats a given test N times](#repeats-a-given-test-n-times) + - [Test groups](#test-groups) + - [Load balancing and caches](#load-balancing-and-caches) + - [Test compatibility](#test-compatibility) + - [Miscellaneous](#miscellaneous) +- [Running verification tasks](#running-verification-tasks) +- [Testing the REST layer](#testing-the-rest-layer) +- [Testing packaging](#testing-packaging) + - [Testing packaging on Windows](#testing-packaging-on-windows) + - [Testing VMs are disposable](#testing-vms-are-disposable) + - [Iterating on packaging tests](#iterating-on-packaging-tests) +- [Testing backwards compatibility](#testing-backwards-compatibility) + - [BWC Testing against a specific remote/branch](#bwc-testing-against-a-specific-remotebranch) + - [Skip fetching latest](#skip-fetching-latest) +- [How to write good tests?](#how-to-write-good-tests) + - [Base classes for test cases](#base-classes-for-test-cases) + - [Good practices](#good-practices) + - [What kind of tests should I write?](#what-kind-of-tests-should-i-write) + - [Refactor code to make it easier to test](#refactor-code-to-make-it-easier-to-test) + - [Bad practices](#bad-practices) + - [Use randomized-testing for coverage](#use-randomized-testing-for-coverage) + - [Abuse randomization in multi-threaded tests](#abuse-randomization-in-multi-threaded-tests) +- [Test coverage analysis](#test-coverage-analysis) +- [Building with extra plugins](#building-with-extra-plugins) +- [Environment misc](#environment-misc) +- [Benchmarking](#benchmarking) + +# Requirements + +You will need the following pieces of software to run these tests: + +- Docker & Docker Compose +- Vagrant +- JDK 14 +- Gradle + +# Creating packages + +To create a distribution without running the tests, run the following: + + ./gradlew assemble + +To create a platform-specific build, use the following depending on your operating system: + + ./gradlew :distribution:archives:linux-tar:assemble + ./gradlew :distribution:archives:darwin-tar:assemble + ./gradlew :distribution:archives:windows-zip:assemble + +## Running OpenSearch from a checkout + +In order to run OpenSearch from source without building a package, you can run it using Gradle: + + ./gradlew run + +### Launching and debugging from an IDE + +If you want to run OpenSearch from your IDE, the `./gradlew run` task supports a remote debugging option: + + ./gradlew run --debug-jvm + +This will instruct all JVMs (including any that run cli tools such as creating the keyring or adding users) to suspend and initiate a debug connection on port incrementing from `5005`. As such, the IDE needs to be instructed to listen for connections on this port. Since we might run multiple JVMs as part of configuring and starting the cluster, it's recommended to configure the IDE to initiate multiple listening attempts. In case of IntelliJ, this option is called "Auto restart" and needs to be checked. In case of Eclipse, "Connection limit" setting needs to be configured with a greater value (ie 10 or more). + +If you have imported the project into IntelliJ according to the instructions in [DEVELOPER_GUIDE.md](DEVELOPER_GUIDE.md#importing-the-project-into-intellij-idea), then a debug run configuration named "Debug OpenSearch" will be created +for you and configured appropriately. + +### Other useful arguments + +- In order to start a node with a different max heap space add: `-Dtests.heap.size=4G` +- In order to disable assertions add: `-Dtests.asserts=false` +- In order to use a custom data directory: `--data-dir=/tmp/foo` +- In order to preserve data in between executions: `--preserve-data` +- In order to remotely attach a debugger to the process: `--debug-jvm` +- In order to set a different keystore password: `--keystore-password yourpassword` +- In order to set an OpenSearch setting, provide a setting with the following prefix: `-Dtests.opensearch.` + +## Test case filtering + +- `tests.class` is a class-filtering shell-like glob pattern +- `tests.method` is a method-filtering glob pattern. + +Run a single test case (variants) + + ./gradlew test -Dtests.class=org.opensearch.package.ClassName + ./gradlew test "-Dtests.class=*.ClassName" + +Run all tests in a package and its sub-packages + + ./gradlew test "-Dtests.class=org.opensearch.package.*" + +Run any test methods that contain *esi* (e.g.: .r*esi*ze.) + + ./gradlew test "-Dtests.method=*esi*" + +Run all tests that are waiting for a bugfix (disabled by default) + + ./gradlew test -Dtests.filter=@awaitsfix + +## Seed and repetitions + +Run with a given seed (seed is a hex-encoded long). + + ./gradlew test -Dtests.seed=DEADBEEF + +## Repeats *all* tests of ClassName N times + +Every test repetition will have a different method seed (derived from a single random master seed). + + ./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName + +## Repeats *all* tests of ClassName N times + +Every test repetition will have exactly the same master (0xdead) and method-level (0xbeef) seed. + + ./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.seed=DEAD:BEEF + +## Repeats a given test N times + +Note that individual test repetitions are passed suffixes, such as: `testFoo[0]`, `testFoo[1]`, etc. Thus using `testmethod` or `tests.method` ending in a glob is necessary to ensure iterations are run. + + ./gradlew test -Dtests.iters=N -Dtests.class=*.ClassName -Dtests.method=mytest* + +Repeats N times but skips any tests after the first failure or M initial failures. + + ./gradlew test -Dtests.iters=N -Dtests.failfast=true -Dtestcase=... + ./gradlew test -Dtests.iters=N -Dtests.maxfailures=M -Dtestcase=... + +## Test groups + +Test groups can be enabled or disabled (true/false). + +Default value provided below in \[brackets\]. + + ./gradlew test -Dtests.awaitsfix=[false] - known issue (@AwaitsFix) + +## Load balancing and caches + +By default the tests run on multiple processes using all the available cores on all available CPUs. Not including hyper-threading. If you want to explicitly specify the number of JVMs you can do so on the command line: + + ./gradlew test -Dtests.jvms=8 + +Or in `~/.gradle/gradle.properties`: + + systemProp.tests.jvms=8 + +Its difficult to pick the "right" number here. Hypercores don’t count for CPU intensive tests and you should leave some slack for JVM-interal threads like the garbage collector. And you have to have enough RAM to handle each JVM. + +## Test compatibility + +It is possible to provide a version that allows to adapt the tests behaviour to older features or bugs that have been changed or fixed in the meantime. + + ./gradlew test -Dtests.compatibility=1.0.0 + +## Miscellaneous + +Run all tests without stopping on errors (inspect log files). + + ./gradlew test -Dtests.haltonfailure=false + +Run more verbose output (JVM parameters, etc.). + + ./gradlew test -verbose + +Change the default suite timeout to 5 seconds for all tests (note the exclamation mark). + + ./gradlew test -Dtests.timeoutSuite=5000! ... + +Change the logging level of OpenSearch (not Gradle) + + ./gradlew test -Dtests.opensearch.logger.level=DEBUG + +Print all the logging output from the test runs to the commandline even if tests are passing. + + ./gradlew test -Dtests.output=true + +Configure the heap size. + + ./gradlew test -Dtests.heap.size=512m + +Pass arbitrary jvm arguments. + + # specify heap dump path + ./gradlew test -Dtests.jvm.argline="-XX:HeapDumpPath=/path/to/heapdumps" + # enable gc logging + ./gradlew test -Dtests.jvm.argline="-verbose:gc" + # enable security debugging + ./gradlew test -Dtests.jvm.argline="-Djava.security.debug=access,failure" + +# Running verification tasks + +To run all verification tasks, including static checks, unit tests, and integration tests: + + ./gradlew check + +Note that this will also run the unit tests and precommit tasks first. If you want to just run the in memory cluster integration tests (because you are debugging them): + + ./gradlew internalClusterTest + +If you want to just run the precommit checks: + + ./gradlew precommit + +Some of these checks will require `docker-compose` installed for bringing up test fixtures. If it’s not present those checks will be skipped automatically. + +# Testing the REST layer + +The REST layer is tested through specific tests that are executed against a cluster that is configured and initialized via Gradle. The tests themselves can be written in either Java or with a YAML based DSL. + +YAML based REST tests should be preferred since these are shared between clients. The YAML based tests describe the operations to be executed and the obtained results that need to be tested. + +The YAML tests support various operators defined in the [rest-api-spec](/rest-api-spec/src/main/resources/rest-api-spec/test/README.md) and adhere to the [OpenSearch REST API JSON specification](/rest-api-spec/README.md). In order to run the YAML tests, the relevant API specification needs to be on the test classpath. Any gradle project that has support for REST tests will get the primary API on it’s class path. However, to better support Gradle incremental builds, it is recommended to explicitly declare which parts of the API the tests depend upon. + +For example: + + restResources { + restApi { + includeCore '_common', 'indices', 'index', 'cluster', 'nodes', 'get', 'ingest' + } + } + +The REST tests are run automatically when executing the "./gradlew check" command. To run only the YAML REST tests use the following command (modules and plugins may also include YAML REST tests): + + ./gradlew :rest-api-spec:yamlRestTest + +A specific test case can be run with the following command: + + ./gradlew ':rest-api-spec:yamlRestTest' \ + --tests "org.opensearch.test.rest.ClientYamlTestSuiteIT" \ + -Dtests.method="test {p0=cat.segments/10_basic/Help}" + +The YAML REST tests support all the options provided by the randomized runner, plus the following: + +- `tests.rest.suite`: comma separated paths of the test suites to be run (by default loaded from /rest-api-spec/test). It is possible to run only a subset of the tests providing a sub-folder or even a single yaml file (the default /rest-api-spec/test prefix is optional when files are loaded from classpath) e.g. `-Dtests.rest.suite=index,get,create/10_with_id` + +- `tests.rest.blacklist`: comma separated globs that identify tests that are blacklisted and need to be skipped e.g. `-Dtests.rest.blacklist=index/**/Index document,get/10_basic/**` + +Java REST tests can be run with the "javaRestTest" task. + +For example : + + ./gradlew :modules:mapper-extras:javaRestTest + + ./gradlew ':modules:mapper-extras:javaRestTest' \ + --tests "org.opensearch.index.mapper.TokenCountFieldMapperIntegrationIT.testSearchByTokenCount {storeCountedFields=true loadCountedFields=false}" + +yamlRestTest’s and javaRestTest’s are easy to identify, since they are found in a respective source directory. However, there are some more specialized REST tests that use custom task names. These are usually found in "qa" projects commonly use the "integTest" task. + +If in doubt about which command to use, simply run <gradle path>:check + +Note that the REST tests, like all the integration tests, can be run against an external cluster by specifying the `tests.cluster` property, which if present needs to contain a comma separated list of nodes to connect to (e.g. localhost:9300). A transport client will be created based on that and used for all the before|after test operations, and to extract the http addresses of the nodes so that REST requests can be sent to them. + +# Testing packaging + +The packaging tests use Vagrant virtual machines or cloud instances to verify that installing and running OpenSearch distributions works correctly on supported operating systems. These tests should really only be run on ephemeral systems because they’re destructive; that is, these tests install and remove packages and freely modify system settings, so you will probably regret it if you execute them on your development machine. + +When you run a packaging test, Gradle will set up the target VM and mount your repository directory in the VM. Once this is done, a Gradle task will issue a Vagrant command to run a **nested** Gradle task on the VM. This nested Gradle runs the actual "destructive" test classes. + +1. Install Virtual Box and Vagrant. + +2. (Optional) Install [vagrant-cachier](https://github.com/fgrehm/vagrant-cachier) to squeeze a bit more performance out of the process (Note: as of 2021, vagrant-cachier is unmaintained): + + vagrant plugin install vagrant-cachier + +3. You can run all of the OS packaging tests with `./gradlew packagingTest`. This task includes our legacy `bats` tests. + + To run only the OS tests that are written in Java, run `.gradlew distroTest`, will cause Gradle to build the tar, zip, and deb packages and all the plugins. It will then run the tests on every available system. This will take a very long time. + + Fortunately, the various systems under test have their own Gradle tasks under `qa/os`. To find out what packaging combinations can be tested on a system, run the `tasks` task. For example: + + ./gradlew :qa:os:ubuntu-1804:tasks + + If you want a quick test of the tarball and RPM packagings for Centos 7, you would run: + + ./gradlew :qa:os:centos-7:distroTest.rpm :qa:os:centos-7:distroTest.linux-archive + +Note that if you interrupt Gradle in the middle of running these tasks, any boxes started will remain running and you’ll have to stop them manually with `./gradlew --stop` or `vagrant halt`. + +All the regular vagrant commands should just work so you can get a shell in a VM running trusty by running `vagrant up ubuntu-1604 --provider virtualbox && vagrant ssh ubuntu-1604`. + +These are the linux flavors supported, all of which we provide images for + +- ubuntu-1604 aka xenial +- ubuntu-1804 aka bionic beaver +- debian-8 aka jessie +- debian-9 aka stretch, the current debian stable distribution +- centos-6 +- centos-7 +- rhel-8 +- fedora-28 +- fedora-29 +- oel-6 aka Oracle Enterprise Linux 6 +- oel-7 aka Oracle Enterprise Linux 7 +- sles-12 +- opensuse-42 aka Leap + +We’re missing the following from the support matrix because there aren’t high quality boxes available in vagrant atlas: + +- sles-11 + +## Testing packaging on Windows + +The packaging tests also support Windows Server 2012R2 and Windows Server 2016. Unfortunately we’re not able to provide boxes for them in open source use because of licensing issues. Any Virtualbox image that has WinRM and Powershell enabled for remote users should work. + +Testing on Windows requires the [vagrant-winrm](https://github.com/criteo/vagrant-winrm) plugin. + + vagrant plugin install vagrant-winrm + +Specify the image IDs of the Windows boxes to gradle with the following project properties. They can be set in `~/.gradle/gradle.properties` such as + + vagrant.windows-2012r2.id=my-image-id + vagrant.windows-2016.id=another-image-id + +or passed on the command line such as `-Pvagrant.windows-2012r2.id=my-image-id` or `-Pvagrant.windows-2016=another-image-id` + +These properties are required for Windows support in all gradle tasks that handle packaging tests. Either or both may be specified. + +If you’re running vagrant commands outside of gradle, specify the Windows boxes with the environment variables. + +- `VAGRANT_WINDOWS_2012R2_BOX` +- `VAGRANT_WINDOWS_2016_BOX` + +## Testing VMs are disposable + +It’s important to think of VMs like cattle. If they become lame you just shoot them and let vagrant reprovision them. Say you’ve hosed your precise VM: + + vagrant ssh ubuntu-1604 -c 'sudo rm -rf /bin'; echo oops + +All you’ve got to do to get another one is + + vagrant destroy -f ubuntu-1604 && vagrant up ubuntu-1604 --provider virtualbox + +The whole process takes a minute and a half on a modern laptop, two and a half without vagrant-cachier. + +Some vagrant commands will work on all VMs at once: + + vagrant halt + vagrant destroy -f + +`vagrant up` would normally start all the VMs but we’ve prevented that because that’d consume a ton of ram. + +## Iterating on packaging tests + +Because our packaging tests are capable of testing many combinations of OS (e.g., Windows, Linux, etc.), package type (e.g., zip file, RPM, etc.) and so forth, it’s faster to develop against smaller subsets of the tests. For example, to run tests for the default archive distribution on Fedora 28: + + ./gradlew :qa:os:fedora-28:distroTest.linux-archive + +These test tasks can use the `--tests`, `--info`, and `--debug` parameters just like non-OS tests can. For example: + + ./gradlew :qa:os:fedora-28:distroTest.linux-archive --tests "com.opensearch.packaging.test.ArchiveTests" + +# Testing backwards compatibility + +Backwards compatibility tests exist to test upgrading from each supported version to the current version. To run them all use: + + ./gradlew bwcTest + +A specific version can be tested as well. For example, to test bwc with version 5.3.2 run: + + ./gradlew v5.3.2#bwcTest + +Use -Dtest.class and -Dtests.method to run a specific bwcTest test. For example to run a specific tests from the x-pack rolling upgrade from 7.7.0: + + ./gradlew :x-pack:qa:rolling-upgrade:v7.7.0#bwcTest \ + -Dtests.class=org.opensearch.upgrades.UpgradeClusterClientYamlTestSuiteIT \ + -Dtests.method="test {p0=*/40_ml_datafeed_crud/*}" + +Tests are ran for versions that are not yet released but with which the current version will be compatible with. These are automatically checked out and built from source. See [VersionCollection](./buildSrc/src/main/java/org/opensearch/gradle/VersionCollection.java) and [distribution/bwc/build.gradle](./distribution/bwc/build.gradle) for more information. + +When running `./gradlew check`, minimal bwc checks are also run against compatible versions that are not yet released. + +## BWC Testing against a specific remote/branch + +Sometimes a backward compatibility change spans two versions. A common case is a new functionality that needs a BWC bridge in an unreleased versioned of a release branch (for example, 5.x). To test the changes, you can instruct Gradle to build the BWC version from a another remote/branch combination instead of pulling the release branch from GitHub. You do so using the `bwc.remote` and `bwc.refspec.BRANCH` system properties: + + ./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x + +The branch needs to be available on the remote that the BWC makes of the repository you run the tests from. Using the remote is a handy trick to make sure that a branch is available and is up to date in the case of multiple runs. + +Example: + +Say you need to make a change to `master` and have a BWC layer in `5.x`. You will need to: . Create a branch called `index_req_change` off your remote `${remote}`. This will contain your change. . Create a branch called `index_req_bwc_5.x` off `5.x`. This will contain your bwc layer. . Push both branches to your remote repository. . Run the tests with `./gradlew check -Dbwc.remote=${remote} -Dbwc.refspec.5.x=index_req_bwc_5.x`. + +### Skip fetching latest + +For some BWC testing scenarios, you want to use the local clone of the repository without fetching latest. For these use cases, you can set the system property `tests.bwc.git_fetch_latest` to `false` and the BWC builds will skip fetching the latest from the remote. + +# How to write good tests? + +## Base classes for test cases + +There are multiple base classes for tests: + +- **`OpenSearchTestCase`**: The base class of all tests. It is typically extended directly by unit tests. +- **`OpenSearchSingleNodeTestCase`**: This test case sets up a cluster that has a single node. +- **`OpenSearchIntegTestCase`**: An integration test case that creates a cluster that might have multiple nodes. +- **`OpenSearchRestTestCase`**: An integration tests that interacts with an external cluster via the REST API. This is used for Java based REST tests. +- **`OpenSearchClientYamlSuiteTestCase`** : A subclass of `OpenSearchRestTestCase` used to run YAML based REST tests. + +## Good practices + +### What kind of tests should I write? + +Unit tests are the preferred way to test some functionality: most of the time they are simpler to understand, more likely to reproduce, and unlikely to be affected by changes that are unrelated to the piece of functionality that is being tested. + +The reason why `OpenSearchSingleNodeTestCase` exists is that all our components used to be very hard to set up in isolation, which had led us to having a number of integration tests but close to no unit tests. `OpenSearchSingleNodeTestCase` is a workaround for this issue which provides an easy way to spin up a node and get access to components that are hard to instantiate like `IndicesService`. Whenever practical, you should prefer unit tests. + +Many tests extend `OpenSearchIntegTestCase`, mostly because this is how most tests used to work in the early days of Elasticsearch. However the complexity of these tests tends to make them hard to debug. Whenever the functionality that is being tested isn’t intimately dependent on how OpenSearch behaves as a cluster, it is recommended to write unit tests or REST tests instead. + +In short, most new functionality should come with unit tests, and optionally REST tests to test integration. + +### Refactor code to make it easier to test + +Unfortunately, a large part of our code base is still hard to unit test. Sometimes because some classes have lots of dependencies that make them hard to instantiate. Sometimes because API contracts make tests hard to write. Code refactors that make functionality easier to unit test are encouraged. If this sounds very abstract to you, you can have a look at [this pull request](https://github.com/elastic/elasticsearch/pull/16610) for instance, which is a good example. It refactors `IndicesRequestCache` in such a way that: - it no longer depends on objects that are hard to instantiate such as `IndexShard` or `SearchContext`, - time-based eviction is applied on top of the cache rather than internally, which makes it easier to assert on what the cache is expected to contain at a given time. + +## Bad practices + +### Use randomized-testing for coverage + +In general, randomization should be used for parameters that are not expected to affect the behavior of the functionality that is being tested. For instance the number of shards should not impact `date_histogram` aggregations, and the choice of the `store` type (`niofs` vs `mmapfs`) does not affect the results of a query. Such randomization helps improve confidence that we are not relying on implementation details of one component or specifics of some setup. + +However it should not be used for coverage. For instance if you are testing a piece of functionality that enters different code paths depending on whether the index has 1 shards or 2+ shards, then we shouldn’t just test against an index with a random number of shards: there should be one test for the 1-shard case, and another test for the 2+ shards case. + +### Abuse randomization in multi-threaded tests + +Multi-threaded tests are often not reproducible due to the fact that there is no guarantee on the order in which operations occur across threads. Adding randomization to the mix usually makes things worse and should be done with care. + +# Test coverage analysis + +Generating test coverage reports for OpenSearch is currently not possible through Gradle. However, it *is* possible to gain insight in code coverage using IntelliJ’s built-in coverage analysis tool that can measure coverage upon executing specific tests. Eclipse may also be able to do the same using the EclEmma plugin. + +Test coverage reporting used to be possible with JaCoCo when OpenSearch was using Maven as its build system. Since the switch to Gradle though, this is no longer possible, seeing as the code currently used to build OpenSearch does not allow JaCoCo to recognize its tests. For more information on this, see the discussion in [issue #28867](https://github.com/elastic/elasticsearch/issues/28867). + +Read your IDE documentation for how to attach a debugger to a JVM process. + +# Building with extra plugins + +Additional plugins may be built alongside OpenSearch, where their dependency on OpenSearch will be substituted with the local OpenSearch build. To add your plugin, create a directory called opensearch-extra as a sibling of OpenSearch. Checkout your plugin underneath opensearch-extra and the build will automatically pick it up. You can verify the plugin is included as part of the build by checking the projects of the build. + + ./gradlew projects + +# Environment misc + +There is a known issue with macOS localhost resolve strategy that can cause some integration tests to fail. This is because integration tests have timings for cluster formation, discovery, etc. that can be exceeded if name resolution takes a long time. To fix this, make sure you have your computer name (as returned by `hostname`) inside `/etc/hosts`, e.g.: + + 127.0.0.1 localhost OpenSearchMBP.local + 255.255.255.255 broadcasthost + ::1 localhost OpenSearchMBP.local` + +# Benchmarking + +For changes that might affect the performance characteristics of OpenSearch you should also run macrobenchmarks. There is also a macrobenchmarking tool called [Rally](https://github.com/elastic/rally) which you can use to measure the performance impact. To get started, please see [Rally’s documentation](https://esrally.readthedocs.io/en/stable/). \ No newline at end of file diff --git a/Vagrantfile b/Vagrantfile index a7955246088a9..39cc4f9882c52 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -10,7 +10,7 @@ # vim: ft=ruby ts=2 sw=2 sts=2 et: # This Vagrantfile exists to test packaging. Read more about its use in the -# vagrant section in TESTING.asciidoc. +# vagrant section in TESTING.md. # Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with diff --git a/client/rest-high-level/src/test/resources/org/opensearch/client/security/delegate_pki/README.asciidoc b/client/rest-high-level/src/test/resources/org/opensearch/client/security/delegate_pki/README.asciidoc deleted file mode 100644 index 147949bf6d812..0000000000000 --- a/client/rest-high-level/src/test/resources/org/opensearch/client/security/delegate_pki/README.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -= Certificate Chain details -This document details the steps used to create the certificate chain in this directory. -The chain has a length of 3: the Root CA, the Intermediate CA and the Client Certificate. -All openssl commands use the same configuration file, albeit different sections of it. -The OpenSSL Configuration file is located in this directory as `openssl_config.cnf`. - -== Instructions on generating self-signed Root CA -The self-signed Root CA, 'testRootCA.crt', and its associated private key in this directory -have been generated using the following openssl commands. - -[source,shell] ------------------------------------------------------------------------------------------------------------ -openssl genrsa -out testRootCA.key 2048 -openssl req -x509 -new -key testRootCA.key -days 1460 -subj "/CN=OpenSearch Test Root CA/OU=opensearch/O=org" -out testRootCA.crt -config ./openssl_config.cnf ------------------------------------------------------------------------------------------------------------ - -== Instructions on generating the Intermediate CA -The `testIntermediateCA.crt` CA certificate is "issued" by the `testRootCA.crt`. - -[source,shell] ------------------------------------------------------------------------------------------------------------ -openssl genrsa -out testIntermediateCA.key 2048 -openssl req -new -key testIntermediateCA.key -subj "/CN=OpenSearch Test Intermediate CA/OU=OpenSearch/O=org" -out testIntermediateCA.csr -config ./openssl_config.cnf -openssl x509 -req -in testIntermediateCA.csr -CA testRootCA.crt -CAkey testRootCA.key -CAcreateserial -out testIntermediateCA.crt -days 1460 -sha256 -extensions v3_ca -extfile ./openssl_config.cnf ------------------------------------------------------------------------------------------------------------ - -== Instructions on generating the Client Certificate -The `testClient.crt` end entity certificate is "issued" by the `testIntermediateCA.crt`. - -[source,shell] ------------------------------------------------------------------------------------------------------------ -openssl genrsa -out testClient.key 2048 -openssl req -new -key testClient.key -subj "/CN=OpenSearch Test Client/OU=OpenSearch/O=org" -out testClient.csr -config ./openssl_config.cnf -openssl x509 -req -in testClient.csr -CA testIntermediateCA.crt -CAkey testIntermediateCA.key -CAcreateserial -out testClient.crt -days 1460 -sha256 -extensions usr_cert -extfile ./openssl_config.cnf ------------------------------------------------------------------------------------------------------------ diff --git a/client/rest-high-level/src/test/resources/org/opensearch/client/security/delegate_pki/README.md b/client/rest-high-level/src/test/resources/org/opensearch/client/security/delegate_pki/README.md new file mode 100644 index 0000000000000..f2e1eca606189 --- /dev/null +++ b/client/rest-high-level/src/test/resources/org/opensearch/client/security/delegate_pki/README.md @@ -0,0 +1,24 @@ +All openssl commands use the same configuration file, albeit different sections of it. The OpenSSL Configuration file is located in this directory as `openssl_config.cnf`. + +# Instructions on generating self-signed Root CA + +The self-signed Root CA, *testRootCA.crt*, and its associated private key in this directory have been generated using the following openssl commands. + + openssl genrsa -out testRootCA.key 2048 + openssl req -x509 -new -key testRootCA.key -days 1460 -subj "/CN=OpenSearch Test Root CA/OU=opensearch/O=org" -out testRootCA.crt -config ./openssl_config.cnf + +# Instructions on generating the Intermediate CA + +The `testIntermediateCA.crt` CA certificate is "issued" by the `testRootCA.crt`. + + openssl genrsa -out testIntermediateCA.key 2048 + openssl req -new -key testIntermediateCA.key -subj "/CN=OpenSearch Test Intermediate CA/OU=OpenSearch/O=org" -out testIntermediateCA.csr -config ./openssl_config.cnf + openssl x509 -req -in testIntermediateCA.csr -CA testRootCA.crt -CAkey testRootCA.key -CAcreateserial -out testIntermediateCA.crt -days 1460 -sha256 -extensions v3_ca -extfile ./openssl_config.cnf + +# Instructions on generating the Client Certificate + +The `testClient.crt` end entity certificate is "issued" by the `testIntermediateCA.crt`. + + openssl genrsa -out testClient.key 2048 + openssl req -new -key testClient.key -subj "/CN=OpenSearch Test Client/OU=OpenSearch/O=org" -out testClient.csr -config ./openssl_config.cnf + openssl x509 -req -in testClient.csr -CA testIntermediateCA.crt -CAkey testIntermediateCA.key -CAcreateserial -out testClient.crt -days 1460 -sha256 -extensions usr_cert -extfile ./openssl_config.cnf diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/README.asciidoc b/libs/ssl-config/src/test/resources/certs/pem-utils/README.asciidoc deleted file mode 100644 index 6fdbc170992e2..0000000000000 --- a/libs/ssl-config/src/test/resources/certs/pem-utils/README.asciidoc +++ /dev/null @@ -1,175 +0,0 @@ -= Keystore Details -This document details the steps used to create the certificate and keystore files in this directory. - -== Instructions on generating self-signed certificates -The certificates in this directory have been generated using the following openssl configuration and commands. - -OpenSSL Configuration File is located in this directory as `openssl_config.cnf`. - -NOTE: The `alt_names` section provides the Subject Alternative Names for each certificate. This is necessary for testing -with hostname verification enabled. - -[source,shell] ------------------------------------------------------------------------------------------------------------ -openssl req -new -x509 -extensions v3_req -out .cert -keyout .pem -days 1460 -config config.cnf ------------------------------------------------------------------------------------------------------------ - -When prompted the password is always set to the value of . - -Because we intend to import these certificates into a Java Keystore file, they certificate and private key must be combined -in a PKCS12 certificate. - -[source,shell] ------------------------------------------------------------------------------------------------------------ -openssl pkcs12 -export -name -in .cert -inkey .pem -out .p12 ------------------------------------------------------------------------------------------------------------ - -== Creating the Keystore -We need to create a keystore from the created PKCS12 certificate. - -[source,shell] ------------------------------------------------------------------------------------------------------------ -keytool -importkeystore -destkeystore .jks -srckeystore .p12 -srcstoretype pkcs12 -alias ------------------------------------------------------------------------------------------------------------ - -The keystore is now created and has the private/public key pair. You can import additional trusted certificates using -`keytool -importcert`. When doing so make sure to specify an alias so that others can recreate the keystore if necessary. - -=== Changes and additions for removing Bouncy Castle Dependency - -`testnode-unprotected.pem` is simply the decrypted `testnode.pem` ------- -openssl rsa -in testnode.pem -out testnode-unprotected.pem ------- - -`rsa_key_pkcs8_plain.pem` is the same plaintext key encoded in `PKCS#8` ------- -openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode-unprotected.pem -out rsa_key_pkcs8_plain.pem -nocrypt ------- - -`testnode-aes{128,192,256}.pem` is the testnode.pem private key, encrypted with `AES-128`, `AES-192` and `AES-256` -respectively, encoded in `PKCS#1` -[source,shell] ------- -openssl rsa -aes128 -in testnode-unprotected.pem -out testnode-aes128.pem ------- -[source,shell] ------- -openssl rsa -aes192 -in testnode-unprotected.pem -out testnode-aes192.pem ------- -[source,shell] ------- -openssl rsa -aes256 -in testnode-unprotected.pem -out testnode-aes256.pem ------- - -Adding `DSA` and `EC` Keys to the Keystore - -[source,shell] ------- -keytool -genkeypair -keyalg DSA -alias testnode_dsa -keystore testnode.jks -storepass testnode \ - -keypass testnode -validity 10000 -keysize 1024 -dname "CN=OpenSearch Test Node" \ - -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 ------- -[source,shell] ------- -keytool -genkeypair -keyalg EC -alias testnode_ec -keystore testnode.jks -storepass testnode \ - -keypass testnode -validity 10000 -keysize 256 -dname "CN=OpenSearch Test Node" \ - -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 ------- - -Exporting the `DSA` and `EC` private keys from the keystore - -[source,shell] ----- -keytool -importkeystore -srckeystore testnode.jks -destkeystore dsa.p12 -deststoretype PKCS12 \ - -srcalias testnode_dsa -deststorepass testnode -destkeypass testnode ----- -[source,shell] ----- -openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ - -out dsa_key_pkcs8_plain.pem ----- -[source,shell] ----- -keytool -importkeystore -srckeystore testnode.jks -destkeystore ec.p12 -deststoretype PKCS12 \ - -srcalias testnode_ec -deststorepass testnode -destkeypass testnode ----- -[source,shell] ----- -openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ - -out ec_key_pkcs8_plain.pem ----- - - - -Create `PKCS#8` encrypted key from the encrypted `PKCS#1` encoded `testnode.pem` -[source,shell] ------ -openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode.pem -out key_pkcs8_encrypted.pem ------ -[source,shell] ------ -ssh-keygen -t ed25519 -f key_unsupported.pem ------ - - -Convert `prime256v1-key-noparam.pem` to `PKCS#8` format ------ -openssl pkcs8 -topk8 -in prime256v1-key-noparam.pem -nocrypt -out prime256v1-key-noparam-pkcs8.pem ------ - -Generate the keys and self-signed certificates in `nodes/self/` : - ------- -openssl req -newkey rsa:2048 -keyout n1.c1.key -x509 -days 3650 -subj "/CN=n1.c1" -reqexts SAN \ - -extensions SAN -config <(cat /etc/ssl/openssl.cnf \ - <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node1.cluster1")) -out n1.c1.crt ------- - - -Create a `CA` keypair for testing -[source,shell] ------ -openssl req -newkey rsa:2048 -nodes -keyout ca.key -x509 -subj "/CN=certAuth" -days 10000 -out ca.crt ------ - -Generate Certificates signed with our CA for testing -[source,shell] ------- - openssl req -new -newkey rsa:2048 -keyout n2.c2.key -reqexts SAN -extensions SAN \ - -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ - -out n2.c2.csr ------- - -[source,shell] ------- -openssl x509 -req -in n2.c2.csr -extensions SAN -CA ca.crt -CAkey ca.key -CAcreateserial \ - -extfile <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ - -out n2.c2.crt -days 10000 ------- - -== Generate EC keys using various curves for testing - -[source,shell] -------- -openssl ecparam -list_curves -------- - -will list all the available curves in a given system. -For the purposes of the tests here, the following curves were used to generate ec keys named accordingly: - -[source,shell] -------- -openssl ecparam -name secp256r1 -genkey -out private_secp256r1.pem -openssl ecparam -name secp384r1 -genkey -out private_secp384r1.pem -openssl ecparam -name secp521r1 -genkey -out private_secp521r1.pem -------- - -and the respective certificates - -[source,shell] -------- -openssl req -x509 -extensions v3_req -key private_secp256r1.pem -out certificate_secp256r1.pem -days 1460 -config openssl_config.cnf -openssl req -x509 -extensions v3_req -key private_secp384r1.pem -out certificate_secp384r1.pem -days 1460 -config openssl_config.cnf -openssl req -x509 -extensions v3_req -key private_secp521r1.pem -out certificate_secp521r1.pem -days 1460 -config openssl_config.cnf -------- diff --git a/libs/ssl-config/src/test/resources/certs/pem-utils/README.md b/libs/ssl-config/src/test/resources/certs/pem-utils/README.md new file mode 100644 index 0000000000000..28602ac097f78 --- /dev/null +++ b/libs/ssl-config/src/test/resources/certs/pem-utils/README.md @@ -0,0 +1,125 @@ +# Instructions on generating self-signed certificates + +The certificates in this directory have been generated using the +following openssl configuration and commands. + +OpenSSL Configuration File is located in this directory as +`openssl_config.cnf`. + +The `alt_names` section provides the Subject Alternative Names for each +certificate. This is necessary for testing with hostname verification +enabled. + + openssl req -new -x509 -extensions v3_req -out .cert -keyout .pem -days 1460 -config config.cnf + +When prompted the password is always set to the value of <NAME>. + +Because we intend to import these certificates into a Java Keystore +file, they certificate and private key must be combined in a PKCS12 +certificate. + + openssl pkcs12 -export -name -in .cert -inkey .pem -out .p12 + +# Creating the Keystore + +We need to create a keystore from the created PKCS12 certificate. + + keytool -importkeystore -destkeystore .jks -srckeystore .p12 -srcstoretype pkcs12 -alias + +The keystore is now created and has the private/public key pair. You can +import additional trusted certificates using `keytool -importcert`. When +doing so make sure to specify an alias so that others can recreate the +keystore if necessary. + +## Changes and additions for removing Bouncy Castle Dependency + +`testnode-unprotected.pem` is simply the decrypted `testnode.pem` + + openssl rsa -in testnode.pem -out testnode-unprotected.pem + +`rsa_key_pkcs8_plain.pem` is the same plaintext key encoded in `PKCS#8` + + openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode-unprotected.pem -out rsa_key_pkcs8_plain.pem -nocrypt + +`testnode-aes{128,192,256}.pem` is the testnode.pem private key, +encrypted with `AES-128`, `AES-192` and `AES-256` respectively, encoded +in `PKCS#1` + + openssl rsa -aes128 -in testnode-unprotected.pem -out testnode-aes128.pem + + openssl rsa -aes192 -in testnode-unprotected.pem -out testnode-aes192.pem + + openssl rsa -aes256 -in testnode-unprotected.pem -out testnode-aes256.pem + +Adding `DSA` and `EC` Keys to the Keystore + + keytool -genkeypair -keyalg DSA -alias testnode_dsa -keystore testnode.jks -storepass testnode \ + -keypass testnode -validity 10000 -keysize 1024 -dname "CN=OpenSearch Test Node" \ + -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 + + keytool -genkeypair -keyalg EC -alias testnode_ec -keystore testnode.jks -storepass testnode \ + -keypass testnode -validity 10000 -keysize 256 -dname "CN=OpenSearch Test Node" \ + -ext SAN=dns:localhost,dns:localhost.localdomain,dns:localhost4,dns:localhost4.localdomain4,dns:localhost6,dns:localhost6.localdomain6,ip:127.0.0.1,ip:0:0:0:0:0:0:0:1 + +Exporting the `DSA` and `EC` private keys from the keystore + + keytool -importkeystore -srckeystore testnode.jks -destkeystore dsa.p12 -deststoretype PKCS12 \ + -srcalias testnode_dsa -deststorepass testnode -destkeypass testnode + + openssl pkcs12 -in dsa.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ + -out dsa_key_pkcs8_plain.pem + + keytool -importkeystore -srckeystore testnode.jks -destkeystore ec.p12 -deststoretype PKCS12 \ + -srcalias testnode_ec -deststorepass testnode -destkeypass testnode + + openssl pkcs12 -in ec.p12 -nodes -nocerts | openssl pkcs8 -topk8 -nocrypt -outform pem \ + -out ec_key_pkcs8_plain.pem + +Create `PKCS#8` encrypted key from the encrypted `PKCS#1` encoded +`testnode.pem` + + openssl pkcs8 -topk8 -inform PEM -outform PEM -in testnode.pem -out key_pkcs8_encrypted.pem + + ssh-keygen -t ed25519 -f key_unsupported.pem + +Convert `prime256v1-key-noparam.pem` to `PKCS#8` format + + openssl pkcs8 -topk8 -in prime256v1-key-noparam.pem -nocrypt -out prime256v1-key-noparam-pkcs8.pem + +Generate the keys and self-signed certificates in `nodes/self/` : + + openssl req -newkey rsa:2048 -keyout n1.c1.key -x509 -days 3650 -subj "/CN=n1.c1" -reqexts SAN \ + -extensions SAN -config <(cat /etc/ssl/openssl.cnf \ + <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node1.cluster1")) -out n1.c1.crt + +Create a `CA` keypair for testing + + openssl req -newkey rsa:2048 -nodes -keyout ca.key -x509 -subj "/CN=certAuth" -days 10000 -out ca.crt + +Generate Certificates signed with our CA for testing + +  openssl req -new -newkey rsa:2048 -keyout n2.c2.key -reqexts SAN -extensions SAN \ + -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ + -out n2.c2.csr + + openssl x509 -req -in n2.c2.csr -extensions SAN -CA ca.crt -CAkey ca.key -CAcreateserial \ + -extfile <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=otherName.1:2.5.4.3;UTF8:node2.cluster2"))\ + -out n2.c2.crt -days 10000 + +# Generate EC keys using various curves for testing + + openssl ecparam -list_curves + +will list all the available curves in a given system. For the purposes +of the tests here, the following curves were used to generate ec keys +named accordingly: + + openssl ecparam -name secp256r1 -genkey -out private_secp256r1.pem + openssl ecparam -name secp384r1 -genkey -out private_secp384r1.pem + openssl ecparam -name secp521r1 -genkey -out private_secp521r1.pem + +and the respective certificates + + openssl req -x509 -extensions v3_req -key private_secp256r1.pem -out certificate_secp256r1.pem -days 1460 -config openssl_config.cnf + openssl req -x509 -extensions v3_req -key private_secp384r1.pem -out certificate_secp384r1.pem -days 1460 -config openssl_config.cnf + openssl req -x509 -extensions v3_req -key private_secp521r1.pem -out certificate_secp521r1.pem -days 1460 -config openssl_config.cnf diff --git a/qa/os/README.md b/qa/os/README.md index 85e55fedeac0d..5c95825285870 100644 --- a/qa/os/README.md +++ b/qa/os/README.md @@ -17,7 +17,7 @@ tests should be added to this set of tests if possible. ## Running these tests -See the section in [TESTING.asciidoc](../../TESTING.asciidoc#testing-packaging) +See the section in [TESTING.md](../../TESTING.md#testing-packaging) ## Adding a new test class diff --git a/rest-api-spec/README.markdown b/rest-api-spec/README.md similarity index 100% rename from rest-api-spec/README.markdown rename to rest-api-spec/README.md diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc b/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc deleted file mode 100644 index fbee503e61557..0000000000000 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/README.asciidoc +++ /dev/null @@ -1,482 +0,0 @@ -Test Suite: -=========== - -[NOTE] -.Required settings -======================================= -Certain tests require specific settings to be applied to the -OpenSearch instance in order to pass. You should run -OpenSearch as follows: - -[source,sh] ---------------------- -bin/opensearch -Enode.attr.testattr=test -Epath.repo=/tmp -Erepositories.url.allowed_urls='http://snapshot.*' ---------------------- - -======================================= - -Test file structure --------------------- - -A YAML test file consists of: - -- an optional `setup` section, followed by -- an optional `teardown` section, followed by -- one or more test sections - -For instance: - - setup: - - do: .... - - do: .... - - --- - teardown: - - do: .... - - --- - "First test": - - do: ... - - match: ... - - --- - "Second test": - - do: ... - - match: ... - - -A `setup` section contains a list of commands to run before each test -section in order to setup the same environment for each test section. - -A `teardown` section contains a list of commands to run after each test -section in order to setup the same environment for each test section. This -may be needed for modifications made by the test that are not cleared by the -deletion of indices and templates. - -A test section represents an independent test, containing multiple `do` -statements and assertions. The contents of a test section must be run in -order, but individual test sections may be run in any order, as follows: - -1. run `setup` (if any) -2. reset the `response` var and the `stash` (see below) -2. run test contents -3. run `teardown` (if any) -4. delete all indices and all templates - -Dot notation: -------------- -Dot notation is used for (1) method calls and (2) hierarchical data structures. For -instance, a method call like `cluster.health` would do the equivalent of: - - client.cluster.health(...params...) - -A test against `_tokens.1.token` would examine the `token` key, in the second element -of the `tokens` array, inside the `response` var (see below): - - $val = $response->{tokens}[1]{token} # Perl syntax roolz! - -If one of the levels (eg `tokens`) does not exist, it should return an undefined value. -If no field name is given (ie the empty string) then return the current -$val -- used for testing the whole response body. - -Use \. to specify paths that actually contain '.' in the key name, for example -in the `indices.get_settings` API. - -Skipping tests: ---------------- -If a test section should only be run on certain versions of OpenSearch, -then the first entry in the section (after the title) should be called -`skip`, and should contain the range of versions to be -skipped, and the reason why the tests are skipped. For instance: - -.... - "Parent": - - skip: - version: "0.20.1 - 0.90.2" - reason: Delete ignores the parent param - - - do: - ... test definitions ... -.... - -All tests in the file following the skip statement should be skipped if: -`min <= current <= max`. - -The `version` range can leave either bound empty, which means "open ended". -For instance: -.... - "Parent": - - skip: - version: "1.0.0.Beta1 - " - reason: Delete ignores the parent param - - - do: - ... test definitions ... -.... - -The skip section can also be used to list new features that need to be -supported in order to run a test. This way the up-to-date runners will -run the test, while the ones that don't support the feature yet can -temporarily skip it, and avoid having lots of test failures in the meantime. -Once all runners have implemented the feature, it can be declared supported -by default, thus the related skip sections can be removed from the tests. - -.... - "Parent": - - skip: - features: regex - - - do: - ... test definitions ... -.... - -The `features` field can either be a string or an array of strings. -The skip section requires to specify either a `version` or a `features` list. - -=== `catch_unauthorized` - -Runner supports `catch: unauthorized` on a `do` operator. - -=== `default_shards` - -This test can only run if the cluster is running with the distributions default number of shards. - -The Java test runner introduces randomness and sometimes overrides the default number of shards to `2`. -If the default number of shards is changed, test marked with this feature should *not* run - -=== `headers` - -The runner is able to set per request headers on the `do` operation - -=== `node_selector` - -Indicates the runner can parse `node_selector` under the `do` operator and use its metadata to select the node to -perform the `do` operation on. - -=== `stash_in_key` - -Allows you to use a stashed value in any key of an object during a `match` assertion - -.... -- set: {nodes.$master.http.publish_address: host} -- match: - $body: - { - "nodes": { - $host: { - ... stuff in here ... - } - } - } -.... - -=== `stash_in_path` - -Allows a stashed value to be referenced in path lookups as a single token. E.g: - -.... -path.$stash.value -.... - -=== `embedded_stash_key` - -Allows a stashed key to appear anywhere in the path (note the placeholder needs to be within curly brackets too in this case): - -.... -field1.e${placeholder}ments.element1 -.... - -=== `stash_path_replace` -Used only in the doc snippet tests. Allow you to do ease replacements using a special `$_path` marker. - -.... -// TESTRESPONSEs/somevalue/$body.${_path}/ to mean "replace -somevalue with whatever is the response in the same position." -.... - -=== `warnings` - -The runner can assert the warnings headers returned by OpenSearch through the `warning:` assertations -under `do:` operations. - -=== `yaml` - -The runner is able to send and receive `application/yaml` and perform all assertions on the returned data. - -=== `contains` - -Asserts an array of object contains an object with a property set to a certain value. e.g: - -... -contains: { nodes.$master.plugins: { name: painless-whitelist } } -... - -Asserts the plugins array contains an object with a `name` property with the value `painless-whitelist` - -=== `transform_and_set` - -Supports the `transform_and_set` operator as described in this document. - -=== `arbitrary_key` - -Allows you to stash an arbitrary key from a returned map e.g: - -.... -- set: - nodes._arbitrary_key_: node_id -.... - -This means: Stash any of the keys returned under `nodes` as `$node_id` - -Required operators: -------------------- - -=== `do` - -The `do` operator calls a method on the client. For instance: - -.... - - do: - cluster.health: - level: shards -.... - -The response from the `do` operator should be stored in the `response` var, which -is reset (1) at the beginning of a file or (2) on the next `do`. - -If the arguments to `do` include `catch`, then we are expecting an error, which should -be caught and tested. For instance: - -.... - - do: - catch: missing - get: - index: test - type: test - id: 1 -.... - -The argument to `catch` can be any of: - -[horizontal] -`bad_request`:: a 400 response from ES -`unauthorized`:: a 401 response from ES -`forbidden`:: a 403 response from ES -`missing`:: a 404 response from ES -`request_timeout`:: a 408 response from ES -`conflict`:: a 409 response from ES -`request`:: a 4xx-5xx error response from ES, not equal to any named response - above -`unavailable`:: a 503 response from ES -`param`:: a client-side error indicating an unknown parameter has been passed - to the method -`/foo bar/`:: the text of the error message matches this regular expression - -If `catch` is specified, then the `response` var must be cleared, and the test -should fail if no error is thrown. - -If the arguments to `do` include `warnings` then we are expecting a `Warning` -header to come back from the request. If the arguments *don't* include a -`warnings` argument then we *don't* expect the response to include a `Warning` -header. The warnings must match exactly. Using it looks like this: - -.... - - do: - warnings: - - '[index] is deprecated' - - quotes are not required because yaml - - but this argument is always a list, never a single string - - no matter how many warnings you expect - get: - index: test - type: test - id: 1 -.... - -If the arguments to `do` include `allowed_warnings` then matching `Warning` -headers do not fail the request. Unlike the `warnings` argument, these aren't -expected so much as "allowed". This usually comes up in backwards compatibility -testing. Using it looks like this: - -.... - - do: - allowed_warnings: - - some warning - - this argument is also always a list, never a single string - - no matter how many warnings you expect - get: - index: test - type: test - id: 1 -.... - -If the arguments to `do` include `node_selector` then the request is only -sent to nodes that match the `node_selector`. It looks like this: - -.... -"test id": - - skip: - features: node_selector - - do: - node_selector: - version: " - 6.9.99" - index: - index: test-weird-index-中文 - type: weird.type - id: 1 - body: { foo: bar } -.... - -If you list multiple selectors then the request will only go to nodes that -match all of those selectors. The following selectors are supported: - -- `version`: Only nodes who's version is within the range will receive the -request. The syntax for the pattern is the same as when `version` is within -`skip`. -- `attribute`: Only nodes that have an attribute matching the name and value -of the provided attribute match. -Looks like: -.... - node_selector: - attribute: - name: value -.... - -=== `set` - -For some tests, it is necessary to extract a value from the previous `response`, in -order to reuse it in a subsequent `do` and other tests. For instance, when -testing indexing a document without a specified ID: - -.... - - do: - index: - index: test - type: test - - set: { _id: id } # stash the value of `response._id` as `id` - - do: - get: - index: test - type: test - id: $id # replace `$id` with the stashed value - - match: { _id: $id } # the returned `response._id` matches the stashed `id` -.... - -The last response obtained gets always stashed automatically as a string, called `body`. -This is useful when needing to test apis that return text rather than json (e.g. cat api), -as it allows to treat the whole body as an ordinary string field. - -Stashed values can be used in property names, eg: - -.... - - do: - cluster.state: {} - - - set: {master_node: master} - - - do: - nodes.info: - metric: [ transport ] - - - is_true: nodes.$master.transport.profiles -.... - - -Note that not only expected values can be retrieved from the stashed values (as in the -example above), but the same goes for actual values: - -.... - - match: { $body: /^.+$/ } # the returned `body` matches the provided regex if the body is text - - match: { $body: {} } # the returned `body` matches the JSON object if the body is JSON -.... - -The stash should be reset at the beginning of each test file. - -=== `transform_and_set` - -For some tests, it is necessary to extract a value and transform it from the previous `response`, in -order to reuse it in a subsequent `do` and other tests. -Currently, it only has support for `base64EncodeCredentials`, for unknown transformations it will not -do anything and stash the value as is. -For instance, when testing you may want to base64 encode username and password for -`Basic` authorization header: - -.... - - do: - index: - index: test - type: test - - transform_and_set: { login_creds: "#base64EncodeCredentials(user,password)" } # stash the base64 encoded credentials of `response.user` and `response.password` as `login_creds` - - do: - headers: - Authorization: Basic ${login_creds} # replace `$login_creds` with the stashed value - get: - index: test - type: test -.... - -Stashed values can be used as described in the `set` section - -=== `is_true` - -The specified key exists and has a true value (ie not `0`, `false`, `undefined`, `null` -or the empty string), eg: - -.... - - is_true: fields.foo # the foo key exists in the fields hash and is "true" -.... - -=== `is_false` - -The specified key doesn't exist or has a false value (ie `0`, `false`, `undefined`, -`null` or the empty string), eg: - -.... - - is_false: fields._source # the _source key doesn't exist in the fields hash or is "false" -.... - -=== `match` - -Used to compare two variables (could be scalars, arrays or hashes). The two variables -should be identical, eg: - -.... - - match: { _source: { foo: bar }} -.... - -Supports also regular expressions with flag X for more readability (accepts whitespaces and comments): - -.... - - match: - $body: > - /^ epoch \s+ timestamp \s+ count \s+ \n - \d+ \s+ \d{2}:\d{2}:\d{2} \s+ \d+ \s+ \n $/ -.... - -**Note:** `$body` is used to refer to the last obtained response body as a string, while `''` refers to the parsed representation (parsed into a Map by the Java runner for instance). Having the raw string response is for example useful when testing cat APIs. - -=== `lt` and `gt` - -Compares two numeric values, eg: - -.... - - lt: { foo: 10000 } # the `foo` value is less than 10,000 -.... - -=== `lte` and `gte` - -Compares two numeric values, eg: - -.... - - lte: { foo: 10000 } # the `foo` value is less than or equal to 10,000 -.... - -=== `length` - -This depends on the data type of the value being examined, eg: - -.... - - length: { _id: 22 } # the `_id` string is 22 chars long - - length: { _tokens: 3 } # the `_tokens` array has 3 elements - - length: { _source: 5 } # the `_source` hash has 5 keys -.... diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/README.md b/rest-api-spec/src/main/resources/rest-api-spec/test/README.md new file mode 100644 index 0000000000000..cfaba9a2347a6 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/README.md @@ -0,0 +1,400 @@ +# Required settings + +Certain tests require specific settings to be applied to the OpenSearch instance in order to pass. You should run OpenSearch as follows: + + bin/opensearch -Enode.attr.testattr=test -Epath.repo=/tmp -Erepositories.url.allowed_urls='http://snapshot.*' + +# Test file structure + +A YAML test file consists of: + +- an optional `setup` section, followed by +- an optional `teardown` section, followed by +- one or more test sections + +For instance: + + setup: + - do: .... + - do: .... + + --- + teardown: + - do: .... + + --- + "First test": + - do: ... + - match: ... + + --- + "Second test": + - do: ... + - match: ... + +A `setup` section contains a list of commands to run before each test section in order to setup the same environment for each test section. + +A `teardown` section contains a list of commands to run after each test section in order to setup the same environment for each test section. This may be needed for modifications made by the test that are not cleared by the deletion of indices and templates. + +A test section represents an independent test, containing multiple `do` statements and assertions. The contents of a test section must be run in order, but individual test sections may be run in any order, as follows: + +1. run `setup` (if any) +2. reset the `response` var and the `stash` (see below) +3. run test contents +4. run `teardown` (if any) +5. delete all indices and all templates + +# Dot notation: + +Dot notation is used for (1) method calls and (2) hierarchical data structures. For instance, a method call like `cluster.health` would do the equivalent of: + + client.cluster.health(...params...) + +A test against `_tokens.1.token` would examine the `token` key, in the second element of the `tokens` array, inside the `response` var (see below): + + $val = $response->{tokens}[1]{token} # Perl syntax roolz! + +If one of the levels (eg `tokens`) does not exist, it should return an undefined value. If no field name is given (ie the empty string) then return the current $val — used for testing the whole response body. + +Use \\. to specify paths that actually contain *.* in the key name, for example in the `indices.get_settings` API. + +# Skipping tests: + +If a test section should only be run on certain versions of OpenSearch, then the first entry in the section (after the title) should be called `skip`, and should contain the range of versions to be skipped, and the reason why the tests are skipped. For instance: + + "Parent": + - skip: + version: "0.20.1 - 0.90.2" + reason: Delete ignores the parent param + + - do: + ... test definitions ... + +All tests in the file following the skip statement should be skipped if: `min <= current <= max`. + +The `version` range can leave either bound empty, which means "open ended". For instance: + + "Parent": + - skip: + version: "1.0.0.Beta1 - " + reason: Delete ignores the parent param + + - do: + ... test definitions ... + +The skip section can also be used to list new features that need to be supported in order to run a test. This way the up-to-date runners will run the test, while the ones that don’t support the feature yet can temporarily skip it, and avoid having lots of test failures in the meantime. Once all runners have implemented the feature, it can be declared supported by default, thus the related skip sections can be removed from the tests. + + "Parent": + - skip: + features: regex + + - do: + ... test definitions ... + +The `features` field can either be a string or an array of strings. The skip section requires to specify either a `version` or a `features` list. + +## `catch_unauthorized` + +Runner supports `catch: unauthorized` on a `do` operator. + +## `default_shards` + +This test can only run if the cluster is running with the distributions default number of shards. + +The Java test runner introduces randomness and sometimes overrides the default number of shards to `2`. If the default number of shards is changed, test marked with this feature should **not** run. + +## `headers` + +The runner is able to set per request headers on the `do` operation. + +## `node_selector` + +Indicates the runner can parse `node_selector` under the `do` operator and use its metadata to select the node to perform the `do` operation on. + +## `stash_in_key` + +Allows you to use a stashed value in any key of an object during a `match` assertion + + - set: {nodes.$master.http.publish_address: host} + - match: + $body: + { + "nodes": { + $host: { + ... stuff in here ... + } + } + } + +## `stash_in_path` + +Allows a stashed value to be referenced in path lookups as a single token. E.g: + + path.$stash.value + +## `embedded_stash_key` + +Allows a stashed key to appear anywhere in the path (note the placeholder needs to be within curly brackets too in this case): + + field1.e${placeholder}ments.element1 + +## `stash_path_replace` + +Used only in the doc snippet tests. Allow you to do ease replacements using a special `$_path` marker. + + // TESTRESPONSEs/somevalue/$body.${_path}/ to mean "replace + somevalue with whatever is the response in the same position." + +## `warnings` + +The runner can assert the warnings headers returned by OpenSearch through the `warning:` assertations under `do:` operations. + +## `yaml` + +The runner is able to send and receive `application/yaml` and perform all assertions on the returned data. + +## `contains` + +Asserts an array of object contains an object with a property set to a certain value. e.g: + +… contains: { nodes.$master.plugins: { name: painless-whitelist } } … + +Asserts the plugins array contains an object with a `name` property with the value `painless-whitelist` + +## `transform_and_set` + +Supports the `transform_and_set` operator as described in this document. + +## `arbitrary_key` + +Allows you to stash an arbitrary key from a returned map e.g: + + - set: + nodes._arbitrary_key_: node_id + +This means: Stash any of the keys returned under `nodes` as `$node_id` + +# Required operators: + +## `do` + +The `do` operator calls a method on the client. For instance: + + - do: + cluster.health: + level: shards + +The response from the `do` operator should be stored in the `response` var, which is reset (1) at the beginning of a file or (2) on the next `do`. + +If the arguments to `do` include `catch`, then we are expecting an error, which should be caught and tested. For instance: + + - do: + catch: missing + get: + index: test + type: test + id: 1 + +The argument to `catch` can be any of: + + ++++ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

bad_request

a 400 response from ES

unauthorized

a 401 response from ES

forbidden

a 403 response from ES

missing

a 404 response from ES

request_timeout

a 408 response from ES

conflict

a 409 response from ES

request

a 4xx-5xx error response from ES, not equal to any named response above

unavailable

a 503 response from ES

param

a client-side error indicating an unknown parameter has been passed to the method

/foo bar/

the text of the error message matches this regular expression

+ +If `catch` is specified, then the `response` var must be cleared, and the test should fail if no error is thrown. + +If the arguments to `do` include `warnings` then we are expecting a `Warning` header to come back from the request. If the arguments **don’t** include a `warnings` argument then we **don’t** expect the response to include a `Warning` header. The warnings must match exactly. Using it looks like this: + + - do: + warnings: + - '[index] is deprecated' + - quotes are not required because yaml + - but this argument is always a list, never a single string + - no matter how many warnings you expect + get: + index: test + type: test + id: 1 + +If the arguments to `do` include `allowed_warnings` then matching `Warning` headers do not fail the request. Unlike the `warnings` argument, these aren’t expected so much as "allowed". This usually comes up in backwards compatibility testing. Using it looks like this: + + - do: + allowed_warnings: + - some warning + - this argument is also always a list, never a single string + - no matter how many warnings you expect + get: + index: test + type: test + id: 1 + +If the arguments to `do` include `node_selector` then the request is only sent to nodes that match the `node_selector`. It looks like this: + + "test id": + - skip: + features: node_selector + - do: + node_selector: + version: " - 6.9.99" + index: + index: test-weird-index-中文 + type: weird.type + id: 1 + body: { foo: bar } + +If you list multiple selectors then the request will only go to nodes that match all of those selectors. The following selectors are supported: + +- `version`: Only nodes who’s version is within the range will receive the request. The syntax for the pattern is the same as when `version` is within `skip`. +- `attribute`: Only nodes that have an attribute matching the name and value of the provided attribute match. Looks like: + + node_selector: + attribute: + name: value + +## `set` + +For some tests, it is necessary to extract a value from the previous `response`, in order to reuse it in a subsequent `do` and other tests. For instance, when testing indexing a document without a specified ID: + + - do: + index: + index: test + type: test + - set: { _id: id } # stash the value of `response._id` as `id` + - do: + get: + index: test + type: test + id: $id # replace `$id` with the stashed value + - match: { _id: $id } # the returned `response._id` matches the stashed `id` + +The last response obtained gets always stashed automatically as a string, called `body`. This is useful when needing to test apis that return text rather than json (e.g. cat api), as it allows to treat the whole body as an ordinary string field. + +Stashed values can be used in property names, eg: + + - do: + cluster.state: {} + + - set: { master_node: master } + + - do: + nodes.info: + metric: [ transport ] + + - is_true: nodes.$master.transport.profiles + +Note that not only expected values can be retrieved from the stashed values (as in the example above), but the same goes for actual values: + + - match: { $body: /^.+$/ } # the returned `body` matches the provided regex if the body is text + - match: { $body: {} } # the returned `body` matches the JSON object if the body is JSON + +The stash should be reset at the beginning of each test file. + +## `transform_and_set` + +For some tests, it is necessary to extract a value and transform it from the previous `response`, in order to reuse it in a subsequent `do` and other tests. Currently, it only has support for `base64EncodeCredentials`, for unknown transformations it will not do anything and stash the value as is. For instance, when testing you may want to base64 encode username and password for `Basic` authorization header: + + - do: + index: + index: test + type: test + - transform_and_set: { login_creds: "#base64EncodeCredentials(user,password)" } # stash the base64 encoded credentials of `response.user` and `response.password` as `login_creds` + - do: + headers: + Authorization: Basic ${login_creds} # replace `$login_creds` with the stashed value + get: + index: test + type: test + +Stashed values can be used as described in the `set` section + +## `is_true` + +The specified key exists and has a true value (ie not `0`, `false`, `undefined`, `null` or the empty string), eg: + + - is_true: fields.foo # the foo key exists in the fields hash and is "true" + +## `is_false` + +The specified key doesn’t exist or has a false value (ie `0`, `false`, `undefined`, `null` or the empty string), eg: + + - is_false: fields._source # the _source key doesn't exist in the fields hash or is "false" + +## `match` + +Used to compare two variables (could be scalars, arrays or hashes). The two variables should be identical, eg: + + - match: { _source: { foo: bar } } + +Supports also regular expressions with flag X for more readability (accepts whitespaces and comments): + + - match: + $body: > + /^ epoch \s+ timestamp \s+ count \s+ \n + \d+ \s+ \d{2}:\d{2}:\d{2} \s+ \d+ \s+ \n $/ + +**Note:** `$body` is used to refer to the last obtained response body as a string, while `''` refers to the parsed representation (parsed into a Map by the Java runner for instance). Having the raw string response is for example useful when testing cat APIs. + +## `lt` and `gt` + +Compares two numeric values, eg: + + - lt: { foo: 10000 } # the `foo` value is less than 10,000 + +## `lte` and `gte` + +Compares two numeric values, eg: + + - lte: { foo: 10000 } # the `foo` value is less than or equal to 10,000 + +## `length` + +This depends on the data type of the value being examined, eg: + + - length: { _id: 22 } # the `_id` string is 22 chars long + - length: { _tokens: 3 } # the `_tokens` array has 3 elements + - length: { _source: 5 } # the `_source` hash has 5 keys